branch_name
stringclasses
149 values
text
stringlengths
23
89.3M
directory_id
stringlengths
40
40
languages
listlengths
1
19
num_files
int64
1
11.8k
repo_language
stringclasses
38 values
repo_name
stringlengths
6
114
revision_id
stringlengths
40
40
snapshot_id
stringlengths
40
40
refs/heads/master
<file_sep>/** * CustumerController * * @description :: Server-side logic for managing custumers * @help :: See http://sailsjs.org/#!/documentation/concepts/Controllers */ module.exports = { 'new': function (req, res) { res.view(); }, 'create': function (req, res, next) { Custumer.create(req.params.all(), function (err, custumer) { if (err) return next(err); res.redirect('/custumer/show/' + custumer.id); }); }, 'show': function (req, res, next) { Custumer.findOne(req.param('id')).populateAll().exec(function (err, custumer) { if (err) return next(); if (!custumer) return next(); res.view({ custumer: custumer }); }); }, 'index': function (req, res, next) { Custumer.find(function AllCustumers(err, custumers) { if (err) return next(err); if (!custumers) return next(); res.view({ custumers: custumers }); }) }, 'edite': function (req, res, next) { Custumer.findOne(req.param('id'), function custumerToEdite(err, custumer) { if (err) return next(err); if (!custumer) return next(); res.view({ custumer: custumer }); }); }, 'update': function (req, res, next) { Custumer.update(req.param('id'), req.params.all(), function custumerUpdated(err) { if (err) return res.redirect('/custumer/edite' + req.param('id')); res.redirect('/custumer/show/' + req.param('id')); }); }, 'destroy': function (req, res, next) { Custumer.destroy(req.param('id')).exec(function () { res.redirect('/custumer'); }); } }; <file_sep>/** * StockController * * @description :: Server-side logic for managing stocks * @help :: See http://sailsjs.org/#!/documentation/concepts/Controllers */ module.exports = { 'new': function (req, res) { Custumer.findOne(req.param('id') , function (err , custumer) { if (err) return next(err) ; res.view({ custumer : custumer }) }); }, 'create': function (req, res, next) { Stock.create(req.params.all(), function (err, stock) { if (err) return next(err); res.redirect('custumer/show/' + stock.entity ) }); }, };
0be0aea0a867e15df1b0e4a14833279ce22a7623
[ "JavaScript" ]
2
JavaScript
devadil/HADDAD_Projet_sails_Teste
cad20a6f9a630cbedbdb6f597aa9f1ce37a753a8
0f677cc2fe78fcb95f5e8491479a96b6bcb7002c
refs/heads/master
<repo_name>vince1396/Utilisateurs<file_sep>/app/src/main/java/com/example/vincent/utilisateurs/Adapter.java package com.example.vincent.utilisateurs; import android.net.Uri; import android.support.v7.widget.RecyclerView; import android.util.Pair; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.TextView; import com.bumptech.glide.Glide; import java.util.ArrayList; public class Adapter extends RecyclerView.Adapter<Adapter.MyViewHolder> { private ArrayList<User> personne; public Adapter(ArrayList<User> pPersonne){ this.personne = pPersonne; } @Override public int getItemCount() { return personne.size(); } @Override public MyViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { LayoutInflater inflater = LayoutInflater.from(parent.getContext()); View view = inflater.inflate(R.layout.listcell, parent, false); return new MyViewHolder(view); } @Override public void onBindViewHolder(MyViewHolder holder, int position) { holder.display(personne.get(position), holder); } public class MyViewHolder extends RecyclerView.ViewHolder { private final TextView name; private final TextView email; private final ImageView img; //private Pair<String, String> currentPair; public MyViewHolder(final View itemView) { super(itemView); name = ((TextView) itemView.findViewById(R.id.name)); email = ((TextView) itemView.findViewById(R.id.email)); img = ((ImageView) itemView.findViewById(R.id.img)); /*itemView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { new AlertDialog.Builder(itemView.getContext()) .setTitle(currentPair.first) .setMessage(currentPair.second) .show(); } }); */ } public void display(User pUser, MyViewHolder holder) { name.setText(pUser.getNom()); email.setText(pUser.getEmail()); Uri url = Uri.parse(pUser.getImg()); Glide .with(holder.img.getContext()) .load(url) .into(img); } } }<file_sep>/app/src/main/java/com/example/vincent/utilisateurs/MainFragment.java package com.example.vincent.utilisateurs; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.squareup.otto.Subscribe; import java.util.ArrayList; import butterknife.BindView; import butterknife.ButterKnife; public class MainFragment extends android.support.v4.app.Fragment { @BindView(R.id.rvUser) RecyclerView myRecyclerView; ArrayList<User> list = new ArrayList<User>(); @Nullable @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_main, container, false); ButterKnife.bind(this, view); MyBus.getBus().register(this); myRecyclerView.setLayoutManager(new LinearLayoutManager(getActivity().getApplicationContext())); myRecyclerView.setAdapter(new Adapter(list)); return view; } @Override public void onViewCreated(View view, @Nullable Bundle savedInstanceState) { super.onViewCreated(view, savedInstanceState); } @Subscribe public void onEvent(Refresh refresh){ list = refresh.getList(); } } <file_sep>/app/src/main/java/com/example/vincent/utilisateurs/MainActivity.java package com.example.vincent.utilisateurs; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.view.View; import android.widget.Button; import android.widget.EditText; import android.widget.LinearLayout; import android.widget.Toast; import java.util.ArrayList; import butterknife.BindView; import butterknife.ButterKnife; import butterknife.OnClick; public class MainActivity extends AppCompatActivity { //===================================== VARIABLES GLOBALES ===================================== @BindView(R.id.create) Button create; @BindView(R.id.valider) Button valider; @BindView(R.id.name) EditText name; @BindView(R.id.email) EditText email; @BindView(R.id.urlimg) EditText urlimg; @BindView(R.id.form) LinearLayout form; private ArrayList<User> alRep = new ArrayList<>(); private boolean ifForm = false; //======================================== ON CREATE =========================================== @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); ButterKnife.bind(this); create.setText(getString(R.string.create)); } //===================================== CLICK SUR CREER ======================================== @OnClick(R.id.create) public void onClickCreate() { form.setVisibility(View.VISIBLE); create.setVisibility(View.INVISIBLE); ifForm = true; } //===================================== CLICK SUR VALIDER ====================================== @OnClick(R.id.valider) public void onClickValider(){ if(name.getText().toString().equals("") || email.getText().toString().equals("")) { Toast.makeText(this, getString(R.string.empty), Toast.LENGTH_SHORT).show(); } else { String pEmail = email.getText().toString(); if (!isValidEmail((pEmail))) { Toast.makeText(this, getString(R.string.emailError), Toast.LENGTH_SHORT).show(); email.setText(""); } else { form.setVisibility(View.INVISIBLE); create.setVisibility(View.VISIBLE); ifForm = false; String pNom = name.getText().toString(); String pUrl = urlimg.getText().toString(); email.setText(""); name.setText(""); urlimg.setText(""); User user = new User(); user.setNom(pNom); user.setEmail(pEmail); user.setImg(pUrl); alRep.add(user); MyBus.getBus().post(new Refresh(alRep)); } } } //===================================== VERIF EMAIL ============================================ public static boolean isValidEmail(CharSequence target) { if(null == target) { return false; } else { return android.util.Patterns.EMAIL_ADDRESS.matcher(target).matches(); } } }
a014ca53d2974632e62246f8e21629b6a704997d
[ "Java" ]
3
Java
vince1396/Utilisateurs
5a69370af561c7d477ab033d21f83d9d9e9edc3f
e6bbe3769adce8b05d048ba14543648194de80cb
refs/heads/master
<file_sep>var React = require('react'); var ReactDOM = require('react-dom'); module.exports = React.createClass({ getInitialState: function () { return { text: '' } }, componentDidMount: function() { ReactDOM.findDOMNode(this.refs.nameInput).focus(); }, render: function () { return <div className="input-group"> <input value={this.state.text} onChange={this.InputChange} onKeyDown={this.AddClick} type="text" className="form-control" ref="nameInput" /> <span className="input-group-btn"> <input onClick={this.AddClick} className="btn btn-default" type="button" value="Add" /> </span> </div> }, AddClick: function (e) { if (e.keyCode == undefined || e.keyCode == 13) { this.props.itemsStore.push({ text: this.state.text, done: false }); this.setState({ text: '' }); } }, InputChange: function (event) { this.setState({ text: event.target.value }); } }); <file_sep>##WhitespaceToDo This is how you configure WhitespaceToDo to work - You will need first is npm -> https://nodejs.org/en/ - Then you will need gulp for that you can type: "npm install --global gulp-cli" in your console ``` > Download the .zip file. Extract the contents of the zip file, then open your terminal, change to the project directory > cd WhitespaceToDo > npm install > gulp ``` I desided to go with react for client-side code and firebase for storage, i also used bootsrap for design. The things that i am most proud of is: - It is easy to use. You can use the enter key on you keybord to add and save items. - I did it easy to add items on startup with the focus keyword. - When you edit the input on a item it adds two buttons for the save and undo functions and is not there all the time. - As soon as you add something it is added to all browsers. - The code is simple and easy to understand. - You do not have to write any backside code with firebase, you can change to any other firebase page and everything will just work. ###Instructions how to use: - To add a item type with your keybord and push enter - To set a item as done click the checkbox on the left - To delete an item click the delete button on the right - To edit a item click on the item and enter the desided text and click the save button, - you can also undo undesired edits with the undo button. <file_sep>var React = require('react'); var Firebase = require('firebase'); var rootUrl = 'https://whitespacetodo.firebaseio.com/'; module.exports = React.createClass({ getInitialState: function() { return { text: this.props.item.text, done: this.props.item.done, textChanged: false } }, componentWillMount: function() { this.fb = new Firebase(rootUrl + 'items/' + this.props.item.key); }, render: function() { return <div className="input-group"> <span className="input-group-addon"> <input type="checkbox" checked={this.state.done} onChange={this.DoneChange} /> </span> <input type="text" disabled={this.state.done} className="form-control" value={this.state.text} onChange={this.TextChange} onKeyDown={this.SaveClick} /> <span className="input-group-btn"> {this.changesButtons()} <input type="button" className="btn btn-default" onClick={this.DeleteClick} value= "Delete"/> </span> </div> }, changesButtons: function() { if(!this.state.textChanged) { return null } else { return [ <input type="button" className="btn btn-default" onClick={this.SaveClick} value="Save"/>, <input type="button" onClick={this.UndoClick} className="btn btn-default" value="Undo" /> ] } }, SaveClick: function (e) { if (e.keyCode == undefined || e.keyCode == 13) { this.fb.update({ text: this.state.text }); this.setState({ textChanged: false }); } }, UndoClick: function() { this.setState({ text: this.props.item.text, textChanged: false }); }, TextChange: function(event) { this.setState({ text: event.target.value, textChanged: true }); }, DoneChange: function(event) { var update = {done: event.target.checked} this.setState(update); this.fb.update(update); }, DeleteClick: function() { this.fb.remove(); } });
f7d960de09f453c7700c342d6aa0c4f50529edda
[ "JavaScript", "Markdown" ]
3
JavaScript
Qualmeru/WhitespaceToDo
3f082ab78075f776cccec8e9c0b0d92dfe01bff8
efa9a672d594d5ee28f25a1c78bbf38d2e04d74b
refs/heads/master
<repo_name>kunny/android-CircleRefreshLayout<file_sep>/circlerefreshlayout-sample/src/main/java/com/androidhuman/circlerefreshlayout/sample/RefreshLayoutActivity.java package com.androidhuman.circlerefreshlayout.sample; import com.androidhuman.circlerefreshlayout.SwipeRefreshLayout; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.app.AppCompatActivity; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.view.Gravity; import android.view.LayoutInflater; import android.view.ViewGroup; import android.widget.TextView; import android.widget.Toast; public class RefreshLayoutActivity extends AppCompatActivity implements SwipeRefreshLayout.OnRefreshListener { public static final String KEY_TITLE = "title"; public static final String KEY_REFRESH_DRAWABLE_STYLE = "refresh_drawable_style"; public static final String KEY_PULL_POSITION = "pull_position"; SwipeRefreshLayout swlRefresh; RecyclerView rlList; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_refreshlayout); String title = getIntent().getStringExtra(KEY_TITLE); if (null != title) { setTitle(title); } swlRefresh = (SwipeRefreshLayout) findViewById(R.id.srl_activity_refreshlayout_refresh); rlList = (RecyclerView) findViewById(R.id.rv_list); setupSwipeRefreshLayout(swlRefresh, getIntent().getExtras()); rlList.setLayoutManager(new LinearLayoutManager(this)); rlList.setAdapter(new ItemAdapter()); } private void setupSwipeRefreshLayout(SwipeRefreshLayout view, @Nullable Bundle extras) { view.setColorSchemeResources(R.color.colorAccent); view.setOnRefreshListener(this); if (null == extras) { return; } @SwipeRefreshLayout.RefreshDrawableStyle int style = extras.getInt(KEY_REFRESH_DRAWABLE_STYLE, SwipeRefreshLayout.CIRCLE); view.setRefreshDrawableStyle(style); @SwipeRefreshLayout.PullPosition int position = extras.getInt(KEY_PULL_POSITION, Gravity.TOP); view.setPullPosition(position); } @Override public void onRefresh() { Toast.makeText(getApplicationContext(), "Refreshing...", Toast.LENGTH_SHORT).show(); } class ItemAdapter extends RecyclerView.Adapter<ItemHolder> { @Override public ItemHolder onCreateViewHolder(ViewGroup parent, int viewType) { return new ItemHolder(parent); } @Override public void onBindViewHolder(ItemHolder holder, int position) { holder.text.setText("Item " + position); } @Override public int getItemCount() { return 30; } } class ItemHolder extends RecyclerView.ViewHolder { public TextView text; public ItemHolder(ViewGroup parent) { super(LayoutInflater.from(parent.getContext()) .inflate(R.layout.item_simple, parent, false)); text = (TextView) itemView.findViewById(R.id.tv_item_simple); } } } <file_sep>/settings.gradle include ':circlerefreshlayout', ':circlerefreshlayout-rxbinding', ':circlerefreshlayout-sample' <file_sep>/dependencies.gradle ext { minSdkVersion = 9 sampleMinSdkVersion = 14 compileSdkVersion = 23 targetSdkVersion = 23 buildToolsVersion = '23.0.3' appcompat = 'com.android.support:appcompat-v7:23.3.0' recyclerview = 'com.android.support:recyclerview-v7:23.3.0' rxAndroid = 'io.reactivex:rxandroid:1.2.0' rxJava = 'io.reactivex:rxjava:1.1.5' supportV4 = 'com.android.support:support-v4:23.3.0' junit4 = 'junit:junit:4.12' }<file_sep>/build.gradle // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() } dependencies { classpath 'com.android.tools.build:gradle:2.1.0' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() } apply plugin: 'checkstyle' checkstyle { toolVersion = "6.6" configFile = new File(rootProject.rootDir, 'config/checkstyle/checkstyle.xml') configProperties.checkStyleConfigDir = rootProject.file('config/checkstyle') } task('checkstyle', type: Checkstyle) { group = "Verification" description = "Runs checkstyle task." source 'src' ignoreFailures false showViolations true include '**/*.java' exclude '**/gen/**' exclude '**/**Test.java' exclude '**/test/**' classpath = files() } afterEvaluate { if (tasks.findByName('check')) { check.dependsOn('checkstyle') } } } task clean(type: Delete) { delete rootProject.buildDir } apply from: file('dependencies.gradle') <file_sep>/CHANGELOG.md # Change Log ## Version 0.1.0 *(2016-05-12)* Initial release. <file_sep>/circlerefreshlayout-sample/src/main/java/com/androidhuman/circlerefreshlayout/sample/WebViewActivity.java package com.androidhuman.circlerefreshlayout.sample; import com.androidhuman.circlerefreshlayout.SwipeRefreshLayout; import android.graphics.Bitmap; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.app.AppCompatActivity; import android.text.TextUtils; import android.view.Gravity; import android.webkit.WebView; import android.webkit.WebViewClient; public class WebViewActivity extends AppCompatActivity implements SwipeRefreshLayout.OnRefreshListener { public static final String KEY_PULL_POSITION = "pull_position"; SwipeRefreshLayout srlRefresh; WebView wvWebView; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_webview); srlRefresh = (SwipeRefreshLayout) findViewById(R.id.srl_activity_webview_refresh); srlRefresh.setOnRefreshListener(this); @SwipeRefreshLayout.PullPosition int pullPosition = getIntent().getIntExtra(KEY_PULL_POSITION, Gravity.TOP); srlRefresh.setPullPosition(pullPosition); wvWebView = (WebView) findViewById(R.id.wv_activity_webview); wvWebView.getSettings().setJavaScriptEnabled(true); wvWebView.setWebViewClient(new WebViewClient() { @Override public void onPageStarted(WebView view, String url, Bitmap favicon) { setTitle("Loading..."); } @Override public void onPageFinished(WebView view, String url) { String pageTitle = view.getTitle(); if (!TextUtils.isEmpty(pageTitle)) { setTitle(pageTitle); } if (srlRefresh.isRefreshing()) { srlRefresh.setRefreshing(false); } } }); wvWebView.loadUrl("http://d.android.com"); } @Override public void onBackPressed() { if (wvWebView.canGoBack()) { wvWebView.goBack(); } else { super.onBackPressed(); } } @Override public void onRefresh() { wvWebView.reload(); } } <file_sep>/circlerefreshlayout-rxbinding/gradle.properties VERSION_NAME=0.1.0-SNAPSHOT VERSION_CODE=1 GROUP=com.androidhuman.circlerefreshlayout POM_NAME=android-CircleRefreshLayout-rxbinding POM_DESCRIPTION=RxJava binding APIs for CircleRefreshLayout. POM_ARTIFACT_ID=circlerefreshlayout-rxbinding POM_PACKAGING=aar POM_URL=https://github.com/kunny/android-CircleRefreshLayout POM_SCM_URL=https://github.com/kunny/android-CircleRefreshLayout POM_SCM_CONNECTION=scm:[email protected]:kunny/android-CircleRefreshLayout.git POM_SCM_DEV_CONNECTION=scm:[email protected]:kunny/android-CircleRefreshLayout.git POM_LICENCE_NAME=The Apache Software License, Version 2.0 POM_LICENCE_URL=http://www.apache.org/licenses/LICENSE-2.0.txt POM_LICENCE_DIST=repo POM_DEVELOPER_ID=kunny POM_DEVELOPER_NAME=<NAME> POM_DEVELOPER_EMAIL=<EMAIL><file_sep>/README.md # android-CircleRefreshLayout [![Build status](https://travis-ci.org/kunny/android-CircleRefreshLayout.svg?branch=master)](https://travis-ci.org/kunny/android-CircleRefreshLayout) A customized SwipeRefreshLayout that supports circular refresh indicator and pull to refresh from the bottom. ## Screenshots ### Circle Refresh indicator (No arrow) ![Circle refresh indicator](assets/circle_refresh_indicator.gif) ### Arrow Refresh indicator (Same as original SwipeRefreshLayout) ![Arrow refresh indicator](assets/arrow_refresh_indicator.gif) ### Pull from bottom ![Arrow refresh indicator](assets/pull_from_bottom.gif) ## Usage ### Change Refresh drawable style ```java SwipeRefreshLayout layout = (SwipeRefreshLayout) findViewById(/* Resource id */); // Circle (which is default) layout.setRefreshDrawableStyle(SwipeRefreshLayout.CIRCLE); // Or with arrow (which is default in original SwipeRefreshLayout in Support Library) layout.setRefreshDrawableStyle(SwipeRefreshLayout.ARROW); ``` ### Change Pull position ```java SwipeRefreshLayout layout = (SwipeRefreshLayout) findViewById(/* Resource id */); // Pull from bottom layout.setPullPosition(Gravity.BOTTOM); // Or from top, which is default layout.setPullPosition(Gravity.TOP); ``` ## Download ### Stable Core: ```groovy compile 'com.androidhuman.circlerefreshlayout:circlerefreshlayout:0.1.0' ``` RxBinding support: ```groovy compile 'com.androidhuman.circlerefreshlayout:circlerefreshlayout-rxbinding:0.1.0' ``` ### Development Snapshot Snapshots of the development version are available in [Sonatype's `snapshots` repository](https://oss.sonatype.org/content/repositories/snapshots/). Core: ```groovy compile 'com.androidhuman.circlerefreshlayout:circlerefreshlayout:0.1.0-SNAPSHOT' ``` RxBinding support: ```groovy compile 'com.androidhuman.circlerefreshlayout:circlerefreshlayout-rxbinding:0.1.0-SNAPSHOT' ``` ## License ``` Copyright 2016 <NAME> <<EMAIL>> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ``` <file_sep>/circlerefreshlayout/gradle.properties VERSION_NAME=0.1.0-SNAPSHOT VERSION_CODE=1 GROUP=com.androidhuman.circlerefreshlayout POM_NAME=android-CircleRefreshLayout POM_DESCRIPTION=A customized SwipeRefreshLayout that supports circular refresh indicator and pull to refresh from the bottom. POM_ARTIFACT_ID=circlerefreshlayout POM_PACKAGING=aar POM_URL=https://github.com/kunny/android-CircleRefreshLayout POM_SCM_URL=https://github.com/kunny/android-CircleRefreshLayout POM_SCM_CONNECTION=scm:[email protected]:kunny/android-CircleRefreshLayout.git POM_SCM_DEV_CONNECTION=scm:[email protected]:kunny/android-CircleRefreshLayout.git POM_LICENCE_NAME=The Apache Software License, Version 2.0 POM_LICENCE_URL=http://www.apache.org/licenses/LICENSE-2.0.txt POM_LICENCE_DIST=repo POM_DEVELOPER_ID=kunny POM_DEVELOPER_NAME=<NAME> POM_DEVELOPER_EMAIL=<EMAIL><file_sep>/circlerefreshlayout-sample/src/main/java/com/androidhuman/circlerefreshlayout/sample/MainActivity.java package com.androidhuman.circlerefreshlayout.sample; import com.androidhuman.circlerefreshlayout.SwipeRefreshLayout; import android.content.Intent; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.app.AppCompatActivity; import android.view.Gravity; import android.view.View; import android.widget.Button; public class MainActivity extends AppCompatActivity { Button btnCircleRefreshIndicator; Button btnArrowRefreshIndicator; Button btnPullFromBottom; Button btnWithWebViewTop; Button btnWithWebViewBottom; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); btnCircleRefreshIndicator = (Button) findViewById( R.id.btn_activity_main_circle_refresh_indicator); btnArrowRefreshIndicator = (Button) findViewById( R.id.btn_activity_main_arrow_refresh_indicator); btnPullFromBottom = (Button) findViewById( R.id.btn_activity_main_pull_from_bottom); btnWithWebViewTop = (Button) findViewById( R.id.btn_activity_main_webview_top); btnWithWebViewBottom = (Button) findViewById( R.id.btn_activity_main_webview_bottom); btnCircleRefreshIndicator.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { startActivity(new Intent(MainActivity.this, RefreshLayoutActivity.class) .putExtra(RefreshLayoutActivity.KEY_REFRESH_DRAWABLE_STYLE, SwipeRefreshLayout.CIRCLE) .putExtra(RefreshLayoutActivity.KEY_TITLE, "Circle Refresh indicator")); } }); btnArrowRefreshIndicator.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { startActivity(new Intent(MainActivity.this, RefreshLayoutActivity.class) .putExtra(RefreshLayoutActivity.KEY_REFRESH_DRAWABLE_STYLE, SwipeRefreshLayout.ARROW) .putExtra(RefreshLayoutActivity.KEY_TITLE, "Arrow Refresh indicator")); } }); btnPullFromBottom.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { startActivity(new Intent(MainActivity.this, RefreshLayoutActivity.class) .putExtra(RefreshLayoutActivity.KEY_PULL_POSITION, Gravity.BOTTOM) .putExtra(RefreshLayoutActivity.KEY_TITLE, "Pull from bottom")); } }); btnWithWebViewTop.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { startActivity(new Intent(MainActivity.this, WebViewActivity.class) .putExtra(WebViewActivity.KEY_PULL_POSITION, Gravity.TOP)); } }); btnWithWebViewBottom.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { startActivity(new Intent(MainActivity.this, WebViewActivity.class) .putExtra(WebViewActivity.KEY_PULL_POSITION, Gravity.BOTTOM)); } }); } }
e04528ca34167c2a40d4ae689214b416efdf990a
[ "Markdown", "Java", "INI", "Gradle" ]
10
Java
kunny/android-CircleRefreshLayout
698c2a75a3a129ca9c27dc5416147206f9478cef
07948b7f1922772e8f333a71d0f3b6e0a708f10b
refs/heads/master
<repo_name>bulokisda/tappxDemo<file_sep>/tappxDemo/DemoController.swift import UIKit import GoogleMobileAds class DemoController: UIViewController { @IBOutlet weak var button: UIButton! @IBAction func press(_ sender: UIButton) { self.performSegue(withIdentifier: "Interstitial", sender: self) } @IBAction func pressedRefreshBanner(_ sender: UIButton) { bannerViewController?.refreshBanner() } var bannerViewController : BannerViewController? override func prepare(for segue: UIStoryboardSegue, sender: Any?) { super.prepare(for: segue, sender: sender) if let vc = segue.destination as? BannerViewController { bannerViewController = vc } } } <file_sep>/tappxDemo/InterstitialViewController.swift import UIKit import WebKit import GoogleMobileAds class InterstitialViewController: UIViewController, TappxInterstitialViewControllerDelegate { @IBOutlet weak var loadingLabel: UILabel! private var tappxInterstitial: TappxInterstitialViewController! = nil private var loadText = "Loading Advert..." private var closed: Bool = false private var transitioning: Bool = false override func viewDidLoad() { super.viewDidLoad() loadingLabel.text = loadText } override func viewDidLayoutSubviews() { super.viewDidLayoutSubviews() if !closed && !transitioning{ resetInterstitial() } else if transitioning { transitionInterstitial() } } override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) { super.viewWillTransition(to: size, with: coordinator) transitioning = true } private func transitionInterstitial() { tappxInterstitial?.delegate = nil self.presentedViewController?.dismiss(animated: false, completion: { self.tappxInterstitial = nil self.closed = true self.transitioning = false self.createInterstitial() }) } private func closeInterstitial() { closed = true tappxInterstitial?.delegate = nil self.presentedViewController?.dismiss(animated: false, completion: { self.navigationController?.popViewController(animated: false) }) tappxInterstitial = nil } private func createInterstitial() { tappxInterstitial = TappxInterstitialViewController(delegate: self) tappxInterstitial.setAutoShowWhenReady(false) tappxInterstitial.load() } func resetInterstitial() { loadText = "Loading Tappx Advert..." if tappxInterstitial != nil { closeInterstitial() } createInterstitial() } // for TappX func presentViewController() -> UIViewController! { return self } func tappxInterstitialViewControllerDidFinishLoad(_ viewController: TappxInterstitialViewController!) { if viewController.isReady { viewController.show() } else { loadingLabel.text = "Advert not available" closeInterstitial() } } func tappxInterstitialViewControllerDidClose(_ viewController: TappxInterstitialViewController!) { loadingLabel.text = "Closing Advert" if !transitioning { closeInterstitial() } } func tappxInterstitialViewControllerDidFail(_ viewController: TappxInterstitialViewController!, withError error: TappxErrorAd!) { loadingLabel.text = "Advert not provided" closeInterstitial() } } <file_sep>/README.md # Tappx Advert Provisioning Demonstration for iOS Portrait | Landscape :-------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------: ![Simulator Screen Shot - iPhone 8 - 2019-11-13 at 14 31 46](https://user-images.githubusercontent.com/57699501/68739668-4ede4400-0624-11ea-9372-6b44d70866f1.png) | ![Simulator Screen Shot - iPhone 8 - 2019-11-13 at 14 56 49](https://user-images.githubusercontent.com/57699501/68740474-1dff0e80-0626-11ea-915d-2295bec198e5.png) Demonstrates how to use Tappx for Ad provisioning in iOS apps (both banner and interstitial). Copes with landscape and portrait transitions. The repository was built on MacOS X - Mojave using Xcode 11.2.1 (11B53) The Demo was written in Swift 5 and targets iOS 12.2 iPhone and iPads via the simulator Requires the Google Ad Framework and Tappx Frameworks (see www.tappx.com for details) Note: You must get your own app key from Tappx and set this in your $tappxKey environment variable. This value will be used to overwrite the privateData.plist property at build time (as part of a run script build phase). ![Screen Shot 2019-11-13 at 14 50 08](https://user-images.githubusercontent.com/57699501/68739977-10955480-0625-11ea-8887-93d8a163fee5.png) <file_sep>/tappxDemo/BannerViewController.swift class BannerViewController: UIViewController, TappxBannerViewControllerDelegate { private var tappxBanner: TappxBannerViewController! = nil @IBOutlet weak var bannerLabel: UILabel! func presentViewController() -> UIViewController! { return self } func tappxBannerViewControllerDidFinishLoad(_ vc: TappxBannerViewController!) { bannerLabel.text = "Loaded Ad" } func tappxBannerViewControllerDidPress(_ vc: TappxBannerViewController!) { } func tappxBannerViewControllerDidClose(_ vc: TappxBannerViewController!) { bannerLabel.text = "Ad Closed" } func tappxBannerViewControllerDidFail(_ vc: TappxBannerViewController!, withError error: TappxErrorAd?) { bannerLabel.text = "Ad Failed" cleanTappxBanner() } override func viewWillDisappear(_ animated: Bool) { super.viewWillDisappear(animated) cleanTappxBanner() } override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) { super.viewWillTransition(to: size, with: coordinator) cleanTappxBanner() } override func viewDidLayoutSubviews() { super.viewDidLayoutSubviews() resetTappxBanner(size: CGSize(width: self.view.bounds.width, height: self.view.bounds.height)) } private func cleanTappxBanner() { if self.tappxBanner != nil { tappxBanner.delegate = nil self.tappxBanner?.removeBanner() self.tappxBanner = nil } } var bannerSize = TappxBannerSize.size320x50 private func resetTappxBanner(size: CGSize) { if self.tappxBanner == nil { let width = size.width //self.view.bounds.width let height = size.height //self.view.bounds.height var location = CGPoint(x: (width - 320)/2, y: (height - 50)) if width >= 728 && height >= 90 { bannerSize = TappxBannerSize.size728x90 location = CGPoint(x: (width - 728)/2, y: (height - 90)) // } else if width >= 300 && height >= 250 { // bannerSize = TappxBannerSize.size300x250 // location = CGPoint(x: (width - 300)/2, y: (height - 250)) } self.tappxBanner = TappxBannerViewController( delegate: self, andSize: bannerSize, andLocation: location ) self.tappxBanner?.load() } } func refreshBanner() { bannerLabel.text = "Ad Refreshing" switch bannerSize { case TappxBannerSize.size320x50: bannerLabel.text! += "(size320x50)" case TappxBannerSize.size728x90: bannerLabel.text! += "(size728x90)" case TappxBannerSize.size300x250: bannerLabel.text! += "(size300x250)" default: bannerLabel.text! += "(size unknown)" } self.tappxBanner?.load() } } <file_sep>/tappxDemo/AppDelegate.swift import GoogleMobileAds import UIKit @UIApplicationMain class AppDelegate: UIResponder, UIApplicationDelegate { var window: UIWindow? var restrictRotation:UIInterfaceOrientationMask = .all // .portrait func application(_ application: UIApplication, willFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey : Any]? = nil) -> Bool { return true } func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?) -> Bool { var nsDictionary: NSDictionary? if let path = Bundle.main.path(forResource: "privateData", ofType: "plist") { nsDictionary = NSDictionary(contentsOfFile: path) if let tappxKey = nsDictionary!["tappxKey"] as? String { TappxFramework.addTappxKey(tappxKey) } else { fatalError("You must set an environment variable 'tappxKey' with your own Tappx key which will be set into the privateData.plist at build time") } } return true } func applicationWillResignActive(_ application: UIApplication) { } func applicationDidEnterBackground(_ application: UIApplication) { } func applicationWillEnterForeground(_ application: UIApplication) { } func applicationDidBecomeActive(_ application: UIApplication) { } func applicationWillTerminate(_ application: UIApplication) { } func application(_ application: UIApplication, supportedInterfaceOrientationsFor window: UIWindow?) -> UIInterfaceOrientationMask { return restrictRotation } }
a9e1044e7d79e575ee644dece179dbfe0ec5922e
[ "Swift", "Markdown" ]
5
Swift
bulokisda/tappxDemo
433782df2ef809015e9043cb4a128b43e851acce
c9822a9f7eef332505e3114e2f8191aaa839f710
refs/heads/master
<file_sep># Django imports # from django.conf.urls import patterns, include, url # 3rd party imports # from rest_framework_nested import routers # asblog_dev imports # from views import BlogViewSet, PostViewSet router = routers.SimpleRouter() router.register(r'blogs', BlogViewSet) blogs_router = routers.NestedSimpleRouter(router, r'blogs', lookup='blog') blogs_router.register(r'posts', PostViewSet) urlpatterns = patterns( '', url(r'^', include(router.urls)), url(r'^', include(blogs_router.urls)), ) <file_sep>#!/usr/bin/env python # # File: $Id$ # """ The Django models for our asblog Django app """ # system imports # # Django imports # from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ from django.core.urlresolvers import reverse # 3rd party module imports # ######################################################################## ######################################################################## # class Blog(models.Model): """ One blog.. it has an owner, posts, meta-data and permissions """ title = models.CharField(_('title'), max_length=2048) slug = models.SlugField(_('slug'), unique=True, db_index=True, max_length=255) description = models.CharField(_('description'), max_length=2048) created = models.DateTimeField(_('created'), auto_now_add=True) updated = models.DateTimeField(_('updated'), auto_now=True) owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=False) class Meta: ordering = ['title'] get_latest_by = "created" #################################################################### # def __unicode__(self): return u'%s' % self.title #################################################################### # def get_absolute_url(self): return reverse('asblog.views.blog', args=[self.slug]) ######################################################################## ######################################################################## # class Post(models.Model): """ Blogs have posts... """ blog = models.ForeignKey(Blog, null=False, related_name="posts") title = models.CharField(_('title'), max_length=2048) slug = models.SlugField(_('slug'), max_length=255) content = models.TextField(_('content')) published = models.BooleanField(_('published'), default=False) created = models.DateTimeField(_('created'), auto_now_add=True) updated = models.DateTimeField(_('updated'), auto_now=True) author = models.ForeignKey(settings.AUTH_USER_MODEL, null=False) class Meta: ordering = ['-created'] order_with_respect_to = "blog" get_latest_by = "created" unique_together = (("blog", "slug"),) index_together = (("blog", "slug"),) #################################################################### # def __unicode__(self): return u'%s' % self.title #################################################################### # def get_absolute_url(self): return reverse('asblog.views.post', args=[self.slug]) <file_sep>asblog ====== The django based blog system I use for my own sites (because none of the others work the way I want them to)<file_sep>Django==1.6 South==0.8.2 pytz==2013.8 arrow==0.4.2 <file_sep># Django imports # from django.shortcuts import render # 3rd party imports # from rest_framework import viewsets from rest_framework import permissions # ASBlog imports # from models import Blog, Post from serializers import BlogSerializer, PostSerializer from permissions import IsOwnerOrReadOnly ######################################################################## ######################################################################## # class BlogViewSet(viewsets.ModelViewSet): """ This viewset automatically provides `list`, `create`, `retrieve`, `update` and `destroy` actions. """ queryset = Blog.objects.all() serializer_class = BlogSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly,) # XXX From our snippets REST tutorial.. left in here as a model for # something we might want to do. # # @link(renderer_classes=[renderers.StaticHTMLRenderer]) # def highlight(self, request, *args, **kwargs): # snippet = self.get_object() # return Response(snippet.highlighted) #################################################################### # def pre_save(self, obj): """ Automatically set the owner of a blog when it is created. """ obj.owner = self.request.user ######################################################################## ######################################################################## # class PostViewSet(viewsets.ModelViewSet): queryset = Post.objects.all() # XXX not linked to a blog? serializer_class = PostSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly,) #################################################################### # def pre_save(self, obj): """ Automatically set the owner of a blog when it is created. """ obj.author = self.request.user <file_sep>#!/usr/bin/env python # # File: $Id$ # """ Serializers.. modeled very much like django-forms but are a way to reprent your models and such data collections so the REST framework can serialize them for callers. """ # system imports # # Django imports # from django.conf import settings # 3rd party imports # from rest_framework import serializers # asblog imports # from asblog.models import Blog, Post ######################################################################## ######################################################################## # class BlogSerializer(serializers.HyperlinkedModelSerializer): owner = serializers.RelatedField() owner_id = serializers.Field(source='owner.id') posts = serializers.HyperlinkedIdentityField(view_name='post-list') class Meta: model = Blog fields = ('url', 'title', 'description', 'created', 'updated', 'owner', 'owner_id', 'posts') ######################################################################## ######################################################################## # class PostSerializer(serializers.HyperlinkedModelSerializer): blog = serializers.HyperlinkedRelatedField(read_only=True, view_name='blog-detail') author = serializers.Field(source='author') class Meta: model = Post fields = ('title', 'content', 'published', 'created', 'updated', 'author') <file_sep>#!/usr/bin/env python # # File: $Id$ # """ ASBlog test cases. """ # system imports # # django imports # from django.test import TestCase from django.contrib.auth.models import User # app imports # from asblog.models import Blog, Post # Create your tests here. ######################################################################## ######################################################################## # class BlogCreationTests(TestCase): #################################################################### # def setUp(self): """ We need users for our tests.. """ self.user_1 = User.objects.create_user('<NAME>', '<EMAIL>', '<PASSWORD>') return #################################################################### # def test_create(self): """ Test the basics of a blog """ blog = Blog(title = "Test", description = "No description", owner = self.user_1 ) blog.save()
c21835630e6bc92a6813efcdc4f54b10612e14eb
[ "Markdown", "Python", "Text" ]
7
Python
scanner/asblog
0a5b23375c4ef7711ef12e9a90e0edcb7fefd008
ec2ff58c4804050e8dcc34e34e634c06e3daabc1
refs/heads/master
<repo_name>techno-celebes/basic-controller<file_sep>/src/MasterCont/ViewController.php <?php namespace TechnoCelebes\BasicController\MasterCont; use Illuminate\Http\Request; use App\Http\Controllers\Controller; class ViewController extends Controller { //tag //om upi private $css_data; private $js_data; private $css_external_data; private $js_external_data; private $data_send; private $title; public function __construct(){ $this->set_data_send("title","My Ordinary Website"); } public function get_css_data(){ return $this->css_data; } public function get_js_data(){ return $this->js_data; } public function get_css_external_data(){ return $this->css_external_data; } public function get_js_external_data(){ return $this->css_external_data; } public function get_data_send(){ return $this->data_send; } public function set_data_send($n="",$data=array()){ if(strlen($n)>0){ $l = $this->get_data_send(); $l[$n] = $data; $this->data_send = $l; } } public function set_array_data_send($data){ foreach($data as $key => $value){ if(strlen($key)>0){ $l = $this->get_data_send(); $l[$key] = $value; $this->data_send = $l; } } } public function set_css_data($data,$prefix=""){ $f_data = array(); if(strlen($prefix)>=1){ foreach($data as $rep_data){ $rep_data = $prefix . $rep_data; $f_data[] = $rep_data; } }else{ $f_data = $data; } $this->css_data = $f_data; $this->data_send['css_data'] = $this->css_data; } public function set_js_data($data,$prefix=""){ $f_data = array(); if(strlen($prefix)>=1){ foreach($data as $rep_data){ $rep_data = $prefix . $rep_data; $f_data[] = $rep_data; } }else{ $f_data = $data; } $this->js_data = $f_data; $this->data_send['js_data'] = $this->js_data; } public function set_css_external_data($data,$prefix=""){ $f_data = array(); if(strlen($prefix)>=1){ foreach($data as $rep_data){ $rep_data = $prefix . $rep_data; $f_data[] = $rep_data; } }else{ $f_data = $data; } $this->css_external_data = $f_data; $this->data_send['css_external_data'] = $this->css_external_data; } public function set_js_external_data($data,$prefix=""){ $f_data = array(); if(strlen($prefix)>=1){ foreach($data as $rep_data){ $rep_data = $prefix . $rep_data; $f_data[] = $rep_data; } }else{ $f_data = $data; } $this->js_external_data = $f_data; $this->data_send['js_external_data'] = $this->js_external_data; } public function combine_css($data,$position="last"){ if(is_array($data)){ if($position == "last"){ $merge = array_merge($this->css_data,$data); }else{ $merge = array_merge($data,$this->css_data); } $this->set_css_data($merge); }else{ if($position == "last"){ array_push($this->css_data,$data); $this->set_css_data($this->css_data); }else{ array_unshift($this->css_data, $data); $this->set_css_data($this->css_data); } } } public function combine_js($data,$position="last"){ if(is_array($data)){ if($position == "last"){ $merge = array_merge($this->js_data,$data); }else{ $merge = array_merge($data,$this->js_data); } $this->set_js_data($merge); }else{ if($position == "last"){ array_push($this->js_data,$data); $this->data_send['js_data'] = $this->js_data; }else{ array_unshift($this->js_data, $data); $this->data_send['js_data'] = $this->js_data; } $this->set_js_data($this->js_data); } } public function combine_css_external($data,$position="last"){ if(is_array($data)){ if($position == "last"){ $merge = array_merge($this->css_external_data,$data); }else{ $merge = array_merge($data,$this->css_external_data); } $this->set_css_external_data($merge); }else{ if($position == "last"){ array_push($this->css_external_data,$data); $this->data_send['css_external_data'] = $this->css_external_data; }else{ array_unshift($this->css_external_data, $data); $this->data_send['css_external_data'] = $this->css_external_data; } $this->set_css_external_data($this->css_external_data); } } public function combine_js_external($data,$position="last"){ if(is_array($data)){ if($position == "last"){ $merge = array_merge($this->js_external_data,$data); }else{ $merge = array_merge($data,$this->js_external_data); } $this->set_js_external_data($merge); }else{ if($position == "last"){ array_push($this->js_external_data,$data); $this->data_send['js_external_data'] = $this->css_external_data; }else{ array_unshift($this->js_external_data, $data); $this->data_send['js_external_data'] = $this->js_external_data; } $this->set_js_external_data($this->js_external_data); } } public function return_view($view_name){ /* m */ return view($view_name,$this->get_data_send()); } }<file_sep>/src/MasterCont/FunctionController.php <?php namespace TechnoCelebes\BasicController\MasterCont; use Illuminate\Http\Request; use App\Http\Controllers\Controller; class FunctionController extends Controller{ public function __construct(){ } }
82105b70a881c0f4a25f8628394f2904b4acae6b
[ "PHP" ]
2
PHP
techno-celebes/basic-controller
643e9940537448676ec7f2706231f7bdf388c66b
19f8bee04dc0e9ae8d0c63b919eee918f149302c
refs/heads/master
<repo_name>yuchia329/linepush<file_sep>/index.js const express = require('express') const bodyParser = require('body-parser') const router = express.Router() const app = express() const line = require('@line/bot-sdk'); const cors = require('cors') app.use(cors()); app.use('/line', router) router.use(bodyParser.json()); const port = 3000; async function linePush(){ const client = new line.Client({ channelAccessToken: '<KEY> }); const message = { type: 'text', text: '你好帥' }; client.pushMessage('Ud3bc63094222812eb3e4b43dcfd6f077', message) .then(() => { console.log('done') }) .catch((err) => { // error handling }); } router.get('/autosend', async (req, res) => { linePush() .then((result) =>{ res.status(200).send(result) }) .catch((e)=>{ console.log(e) res.status(500).send(e.message) }) }) app.listen(port, (error) => { if (error) { console.error(error) } else { console.log(`==> Listening on port ${port}.`) } })
75c13203cb927ec140da011e53a3725eea103c2a
[ "JavaScript" ]
1
JavaScript
yuchia329/linepush
b5408b2d1a4fcba8246f393be9ecff51850b91cd
aa3268ed785f88bc6dc5d8748efa0ebb2bcd4e17
refs/heads/master
<file_sep>import java.util.Scanner; public class fibo { public static void main(String[] args) { Scanner s=new Scanner(System.in); int s1=s.nextInt(); int a=0,b=1; int sum=0; int c=0; for(int i=0;i<s1;i++){ if(s1<=1) sum=1; else{ sum=a+b; a=b; b=sum; } System.out.println(sum); } } }
94bd7e89631c6f9bdc87ed256bae90077ccb6993
[ "Java" ]
1
Java
prabaprabakaran/fibo
ec93db9b5c4d1a2637b678b2f8b5f16fe070ac54
98ebeec032d3255eaad0cca15927d5df52f06659
refs/heads/master
<repo_name>CataCimpean/RobotSendSMSDotNet<file_sep>/RobotSendSMS/utils/SendSMSUtil.cs using RobotSendSMS.model; using System; using System.Collections.Generic; using System.Net; using System.Collections.Specialized; namespace RobotSendSMS.utils { class SendSMSUtil { public static String SendSMSToRecipient(ProgramareVideoproiectiePojo recipient) { String result = ""; using (var wb = new WebClient()) { String messageToSend = getSMSContent(recipient); String number = String.Concat("4", recipient.PhoneNumber); String encoddedMessageToSend = WebUtility.UrlEncode(messageToSend); if (messageToSend != "") { byte[] response = wb.UploadValues("http://api.txtlocal.com/send/", new NameValueCollection() { {"username","<EMAIL>"}, {"hash" , "addMyHashHere"}, {"numbers" , number}, {"message" , encoddedMessageToSend}, {"sender" , "VP"} }); result = System.Text.Encoding.UTF8.GetString(response); Console.WriteLine(result); } } return result; } public static String getSMSContent(ProgramareVideoproiectiePojo recipient) { if (recipient != null) { string message = "Buna "; message = String.Concat(message, recipient.Username); if (recipient.DateProgramming.Equals(DateConverterUtil.GetTomorrowAsString())) { string sunday_morning = recipient.Sunday_morning; string y = "yes"; string n = "no"; if (sunday_morning.ToLower().Equals(y.ToLower())) { message = String.Concat(message, ", iti reamintim ca maine dimineata " + DateConverterUtil.GetTomorrowAsString() + " esti la proiectie.Multumim de implicare, echipa VideoProiectie Speranta."); } else if (sunday_morning.ToLower().Equals(n.ToLower())) { message = String.Concat(message, ", iti reamintim ca maine dupamasa " + DateConverterUtil.GetTomorrowAsString() + " esti la proiectie.Multumim de implicare, echipa VideoProiectie Speranta."); } } else if(recipient.DateProgramming.Equals(DateConverterUtil.GetTodayAsString())) { message = String.Concat(message, ", iti reamintim ca astazi " + DateConverterUtil.GetTodayAsString() + " esti la proiectie.Multumim de implicare, echipa VideoProiectie Speranta."); } return message; } return ""; } } } <file_sep>/RobotSendSMS/utils/HandlerErrorsUtil.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using System.IO; namespace RobotSendSMS.utils { class HandlerErrorsUtil { public static void handlerWarningFromFileJSON(int type) { if (type == 1) { LogMessage.PrintEventWarning("Nu avem date de incarcat,va rugam actualizati fisierul JSON"); } else if (type == 2) { if (!File.Exists(FileUtil.filePathJSON)) { LogMessage.PrintEventWarning("Nu a fost gasit fisierul JSON in locatie cu proiectul.Va rugam verificati!."); } else { LogMessage.PrintEventWarning("Fisierul JSON este gol/null va rugam verificati continutul acestuia."); } } } public static void handlerErrorFromService(String errorMessage) { int errorCode = 0; string recivedStatus = ""; if (errorMessage.Contains("\"code\":3")) { recivedStatus = "Invalid_Number"; errorCode = 3; } else if (errorMessage.Contains("\"code\":6")) { recivedStatus = "Message_too_long"; errorCode = 6; } else if (errorMessage.Contains("\"code\":7")) { recivedStatus = "Insufficient_Credits"; errorCode = 7; } displayErrorFromService(recivedStatus, errorCode); } public static void displayErrorFromService(String recivedStatusFromService, int errorCode) { LogMessage.PrintEventError("A aparut o eroare la trimiterea SMS-ului in data de " + DateConverterUtil.GetTodayAsString() + " Denumire eroare: " + recivedStatusFromService + " Cod eroare: " + errorCode + " ." + "Pt mai multe detalii consultati siteul https://api.txtlocal.com/docs/sendsms "); } } } <file_sep>/RobotSendSMS/controller/ControllerSMS.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using RobotSendSMS.model; using RobotSendSMS.utils; namespace RobotSendSMS.controller { class ControllerSMS { private List<ProgramareVideoproiectiePojo> listOfRecipients = null; private List<ProgramareVideoproiectiePojo> listWithAllDataFromJSON = null; public void startApp(){ listWithAllDataFromJSON = FileUtil.GetListFromJSONFile(); if (listWithAllDataFromJSON != null) { listOfRecipients = ContentParserUtil.GetListOfRecipients(listWithAllDataFromJSON); if (listOfRecipients != null && listOfRecipients.Count != 0) { foreach (ProgramareVideoproiectiePojo currentRecipient in listOfRecipients) { String serviceResponse = SendSMSUtil.SendSMSToRecipient(currentRecipient); //mesaje in eventViewer de succes respectiv eroare. if (serviceResponse.Contains("\"status\":\"success\"")) { LogMessage.PrintEventMessage("Mesaj trimis cu succes catre " + currentRecipient.Username + " in data de " + DateTime.Now); } else { HandlerErrorsUtil.handlerErrorFromService(serviceResponse); } } } else { HandlerErrorsUtil.handlerWarningFromFileJSON(1); } Console.ReadLine(); } else { HandlerErrorsUtil.handlerWarningFromFileJSON(2); } } internal List<ProgramareVideoproiectiePojo> ListOfRecipients { get { return listOfRecipients; } set { listOfRecipients = value; } } internal List<ProgramareVideoproiectiePojo> ListWithAllDataFromJSON { get { return listWithAllDataFromJSON; } set { listWithAllDataFromJSON = value; } } } } <file_sep>/RobotSendSMS/model/ProgramareVideoproiectiePojo.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace RobotSendSMS.model { class ProgramareVideoproiectiePojo { #region fields(username,phoneNumber,dateProgramming,sunday_morning,sunday_evening) private string username; private string phoneNumber; private string dateProgramming; private string sunday_morning; private string sunday_evening; public string Username { get { return username; } set { username = value; } } public string PhoneNumber { get { return phoneNumber; } set { phoneNumber = value; } } public string DateProgramming { get { return dateProgramming; } set { dateProgramming = value; } } public string Sunday_morning { get { return sunday_morning; } set { sunday_morning = value; } } public string Sunday_evening { get { return sunday_evening; } set { sunday_evening = value; } } #endregion #region constructor public ProgramareVideoproiectiePojo(string user, string number, string date, string sunday_m, string sunday_e) { username = user; phoneNumber = number; dateProgramming = date; sunday_morning = sunday_m; sunday_evening = sunday_e; } #endregion } } <file_sep>/RobotSendSMS/Program.cs using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Text; using System.Threading.Tasks; using System.Collections.Specialized; using System.Diagnostics; using RobotSendSMS.model; using RobotSendSMS.utils; using RobotSendSMS.controller; namespace RobotSendSMS { class Program { static void Main(string[] args) { ControllerSMS controllerApp = new ControllerSMS(); controllerApp.startApp(); } } } <file_sep>/RobotSendSMS/utils/DateConverterUtil.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace RobotSendSMS.utils { class DateConverterUtil { public static String GetTomorrowAsString() { DateTime tomorrow = DateTime.Now.AddDays(1); // As DateTime string s_tomorrow = tomorrow.ToString("MM/dd/yyyy"); // As String return s_tomorrow; } public static String GetTodayAsString() { DateTime today = DateTime.Today; // As DateTime string s_today = today.ToString("MM/dd/yyyy"); // As String return s_today; } } } <file_sep>/RobotSendSMS/utils/ContentParserUtils.cs using RobotSendSMS.model; using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace RobotSendSMS.utils { class ContentParserUtil { public static List<ProgramareVideoproiectiePojo> GetListOfRecipients(List<ProgramareVideoproiectiePojo> programmingList) { List<ProgramareVideoproiectiePojo> listOfRecipients =null; if (programmingList != null) { listOfRecipients = new List<ProgramareVideoproiectiePojo>(); foreach (ProgramareVideoproiectiePojo currentPojo in programmingList) { if (currentPojo.DateProgramming.Equals(DateConverterUtil.GetTodayAsString())) { listOfRecipients.Add(currentPojo); }else if (currentPojo.DateProgramming.Equals(DateConverterUtil.GetTomorrowAsString())){ DayOfWeek today = DateTime.Now.DayOfWeek; if ((today == DayOfWeek.Saturday) || (today == DayOfWeek.Sunday)) { listOfRecipients.Add(currentPojo); } } } } return listOfRecipients; } } } <file_sep>/RobotSendSMS/utils/FileUtil.cs using RobotSendSMS.model; using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Threading.Tasks; using Newtonsoft.Json; namespace RobotSendSMS.utils { class FileUtil { #region variables(filenameJSON,filepathJSON) public const string filenameJSON = "VideoProiectieProgramare.json"; public static string filePathJSON = AppDomain.CurrentDomain.BaseDirectory + filenameJSON; #endregion public static List<ProgramareVideoproiectiePojo> GetListFromJSONFile() { List<ProgramareVideoproiectiePojo> list = null; using (StreamReader r = new StreamReader(filePathJSON)) { String json = r.ReadToEnd(); var jsonAsString = JsonConvert.DeserializeObject<List<ProgramareVideoproiectiePojo>>(json); if (jsonAsString != null) { list = new List<ProgramareVideoproiectiePojo>(); foreach (var current in jsonAsString) { list.Add(new ProgramareVideoproiectiePojo(current.Username, current.PhoneNumber, current.DateProgramming, current.Sunday_morning, current.Sunday_evening)); } } } return list; } public static string checkPathFormat(string path) { return (path.Contains(" ")) ? "'" + path + "'" : path; } } }
31f8e24c2e1f34c1d228d5a4bb2de33aeb1b7562
[ "C#" ]
8
C#
CataCimpean/RobotSendSMSDotNet
d91155261ec7ee9929d44b2ba03cb43550a35c70
f6cba8625325c2f7b331d86bf0d37706e0f77f62
refs/heads/master
<repo_name>rsenhuang/back-stage<file_sep>/src/router.js import Vue from 'vue' import Router from 'vue-router' import webEdit from './views/webEdit.vue' import home from './views/home.vue' import storeSetting from './views/storeSetting.vue' import userManage from './views/userManage.vue' Vue.use(Router) export default new Router({ mode: 'history', base: process.env.BASE_URL, routes: [ { path: '/', name: 'home', component: home }, { path: '/1-1', name: 'webEdit', component: webEdit }, { path: '/2-1', name: 'storeSetting', component: storeSetting }, { path: '/2-2', name: 'userManage', component: userManage }, { path: '/about', name: 'about', // route level code-splitting // this generates a separate chunk (about.[hash].js) for this route // which is lazy-loaded when the route is visited. component: () => import(/* webpackChunkName: "about" */ './views/About.vue') } ] })
55ae4200f91bb96d39906c769dfd6eb3881762d5
[ "JavaScript" ]
1
JavaScript
rsenhuang/back-stage
b4d0d828110da67bdf5c4736ebfd2534df2946ab
9c9f63e351d958e7b90f8b68f1a64bc28f708400
refs/heads/master
<file_sep>drop table users; create table users ( userId varchar2(12) primary key, password varchar2(12) not null, name varchar2(12) not null, email varchar2(50) ) insert into users values('javajigi', 'password', '<PASSWORD>', '<EMAIL>');
0238578bac3522b7a0f72ed928a840ee41459b04
[ "SQL" ]
1
SQL
kdw607/Spring_Slipp
d1ce36ae211950cff2dbaaedca402cdc2f617536
b52f4150c43f3bdac0cceeddd445306329d8ba11
refs/heads/master
<file_sep># myfirstsite webpage <file_sep>function myFunction() { var x = document.createElement("INPUT"); x.setAttribute("type", "datetime-local"); x.setAttribute("value", "2014-01"); document.body.appendChild(x); } <file_sep><?php session_start(); $con=mysqli_connect('localhost','root','','students_guide'); $name= $_POST['username']; $email=$_POST['email']; $pass= $_POST['password']; $cpassword = $_POST['confirmpassword']; $s="select * from register where username='$name'"; $result=mysqli_query($con,$s); $num=mysqli_num_rows($result); if($num == 1) { echo "<script>alert('UserName already exists. Please login')</script>"; }else{ $reg= "insert into register(username,email,password,confirmpassword) values ('$name','$email','$pass','$cpassword')"; mysqli_query($con, $reg); echo "<script>alert('Thank you for being connected with our website. Please login to start a tour')</script>"; } ?> <file_sep><?php session_start(); if(!isset($_SESSION['username'])){ header('location:login.php'); } ?> <html> <head> <title>Student Express</title> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <link rel="stylesheet" href="css/course.css"></link> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css"> <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script> </head> <body> <div> <header class="head"> <span><img class="logo" src="img/logo.jpg"></span> <p class="hcontent1"><b>Students Guide</b></p> <p class="hcontent2">Get ready for programming journey</p> <a class="logout" href="logout.php">LOGOUT</a> </header> </div> <div> <h1 class="welcome">WELCOME <?php echo $_SESSION['username']; ?> <h1> </div> <div class="sidebar" id="sidebar1"> <a id="whatsapp" href="https://web.whatsapp.com/"> <span class="space"><i class="fa fa-whatsapp" style="font-size:28px"></i> Whatsapp </a> <a id="facebook" href="https://www.facebook.com/"> <span class="space"><i class="fa fa-facebook"></i> Facebook</span> </a> <a id="skype" href="https://www.skype.com/en/"> <i class="fa fa-skype"></i> <span>Skype</span> </a> <a id="twitter" href="https://twitter.com/login?lang=en"> <i class="fa fa-twitter"></i> <span>Twitter</span> </a> <a id="plus" href="https://twitter.com/login?lang=en"> <i class="fa fa-plus-circle" style="font-size: 30px;"></i> <span></span> </a> </div> <div class="row"> <div class="column"> <h2><b>PYTHON</b></h2> <p> Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms.</p> <button class="button" onclick="window.location='https://www.tutorialspoint.com/python/'"><span>Read more</span></button> </div> <div class="column"> <h2><b>JAVA</b></h2> <p> Java is a programming language and computing platform first released by Sun Microsystems in 1995. There are lots of applications and websites that will not work unless you have Java installed, and more are created every day. Java is fast, secure, and reliable. From laptops to datacenters, game consoles to scientific supercomputers, cell phones to the Internet, Java is everywhere!</p> <button class="button" onclick="window.location='https://www.javatpoint.com/java-tutorial'"><span>Read more</span></button> </div> <div class="column"> <h2><b>JAVASCRIPT</b></h2> <p> JavaScript is a lightweight, interpreted programming language. It is complimentary to and integrated with Java. JavaScript is very easy to implement because it is integrated with HTML. This tutorial has been prepared for JavaScript beginners to help them understand the basic functionality of JavaScript to build dynamic web pages and web applications.</p> <button class="button" onclick="window.location='https://www.geeksforgeeks.org/javascript-tutorial/'"><span>Read more</span></button> </div> </div> <div class="row"> <div class="column"> <h2><b>SWIFT</b></h2> <p> Swift 4 is a new programming language developed by Apple Inc for iOS and OS X development. Swift 4 adopts the best of C and Objective-C, without the constraints of C compatibility. You’ll need Xcode 10. Swift 4 uses the same runtime as the existing Obj-C system on Mac OS and iOS, which enables Swift 4 programs to run on many existing iOS 6 and OS X 10.8 platforms.</p> <button class="button" onclick="window.location='https://www.raywenderlich.com/6338-swift-tutorial-part-1-expressions-variables-and-constants'"><span>Read more</span></button> </div> <div class="column"> <h2><b>C#</b></h2> <p> This tutorial will introduce you to .NET framework using C# language. You will also learn to create a C Sharp based web application using .NET framework. This is a complete online course and covers topics like accessing data, classes & objects, file commands, window forms etc. The first version of the .Net framework was released in the year 2002. </p> <button class="button" onclick="window.location='https://www.guru99.com/c-sharp-tutorial.html'"><span>Read more</span></button> </div> <div class="column"> <h2><b>C/C++</b></h2> <p> C++ is a statically-typed, free-form, (usually) compiled, multi-paradigm, intermediate-level general-purpose middle-level programming language. The main features of C language include low-level access to memory, simple set of keywords, and clean style, these features make C language suitable for system programming like operating system or compiler development.</p> <button class="button" onclick="window.location='https://www.geeksforgeeks.org/c-language-set-1-introduction/'"><span>Read more</span></button> </div> <div class="column"> <h2><b>RUBY</b></h2> <p> Ruby is an object-oriented programming language developed by <NAME>. Ruby is a dynamic programming language with a complex but at the same time expressive grammar. Ruby is inspired by other low level and object oriented programming languages like Lisp, Smalltalk, and Perl and uses syntax that is easy for C and Java programmers to learn. </p> <button class="button" onclick="window.location='https://www.studytonight.com/ruby/introduction-to-ruby'"><span>Read more</span></button> </div> </div> </body> </html>
562658c1511596564b109f2feed87e6f25c62d27
[ "Markdown", "JavaScript", "PHP" ]
4
Markdown
apkousi/myfirstsite
1ff1c5255b2f3097231101ced8aa4b8107e67672
fdc09a3e6f076dd7e8d2729f85812edad17b69b3
refs/heads/master
<repo_name>yt106/ProgrammingAssignment2<file_sep>/cachematrix.R ## Cached Inverse matrix functions ## create a cached matrix with get/set functions makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## calculate the inverse of a matrix or return a cached value if available ## used in conjunction with makeCacheMatrix. ## e.g. ## > m1 <- matrix(c(0, 10, 20, 0), nrow = 2, ncol = 2) ## > m <- makeCacheMatrix(m1) ## > cacheSolve(m) ## [,1] [,2] ## [1,] 0.00 0.1 ## [2,] 0.05 0.0 cacheSolve <- function(x, ...) { inv <- x$getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data, ...) x$setinverse(inv) inv }
f42d660735ce6cc42be0a58c5b67b8f9c36eb3a3
[ "R" ]
1
R
yt106/ProgrammingAssignment2
d841ff8e466a6b58cb3cd8dd657721e8fcc9f9c7
a8410f070a89e203db055be6270612d9882310bc
refs/heads/main
<file_sep># Networking test of Networking with node.js
fcbd756a45e996acc132b9f6305d87b6621c30ad
[ "Markdown" ]
1
Markdown
Tomsovec/Networking
d30bf5685e7ca7289646c0654e1b7869a323b2fd
980b24b2ba43710f22ccfcc1db4a67f74379b7d4
HEAD
<file_sep>$(document).ready( function(){ var player = document.querySelector('.player'); function epicTrigger(){ var music = document.querySelector('.music'); if(music.paused){ player.innerHTML = 'Epic on'; music.play(); } else{ music.pause(); player.innerHTML = 'Epic off'; } } player.addEventListener('click', epicTrigger, false) document.querySelector('.music').addEventListener('ended', function(){ player.innerHTML = 'Epic off'; }, false) function isIE () { var myNav = navigator.userAgent.toLowerCase(); return (myNav.indexOf('msie') != -1) || (myNav.indexOf('trident') != -1); } if(isIE()){ document.querySelector('.presentation').style.visibility = 'hidden'; } if(!isMobile){ oldNode = '.project-image'; newNode = '.project-discription'; setInterval(function(){ for(var i = 0; i < $(oldNode).length; i++){ $($(newNode)[i]).fadeOut(0, function(){ $($(oldNode)[i]).fadeIn(2500).css('display', 'block');; }); } var tmp = oldNode; oldNode = newNode; newNode = tmp; }, 5000); } if(!isMobile){ window.onscroll = function(){ if(window.pageYOffset >= 444){ $('.nick').css('display', 'none'); $('.nick-header').css('display','block'); } if(window.pageYOffset < 444){ $('.nick-header').css('display','none'); $('.nick').css('display', 'block'); } }; $('.ancor-control').each(function(){ $(this).click(function(){ $('html, body').animate({ scrollTop: $( $.attr(this, 'href') ).offset().top -60 },500); }); }); } });
77758c04a3b02c4bd3d243b513c8108c8301d2cf
[ "JavaScript" ]
1
JavaScript
andymost/selfpage
e64efc242b01213b2c1cd8b66b5df174f2a4e55a
e6b7e4f6f27d4f7c334df1ec57489304b64cbea6
refs/heads/master
<repo_name>pauloconnor/terraform-provider-consulacl<file_sep>/CHANGELOG.md # Change Log ## 1.2.0 - 2018-10-XX - Inheritency for rules between ACLs ## 1.1.1 - 2018-08-01 ### Fixed - Mark `token` as computed attribute so that it's really optional ([#1]) ## 1.1.0 - 2018-07-06 ### Added - Import functionality for `consulacl_token` ## 1.0.0 - 2018-07-05 ### Added - Initial implementation for `consulacl_token` resource [#1]: https://github.com/Ashald/terraform-provider-consulacl/issues/1 <file_sep>/README.md # Terraform Consul ACL Provider ## Overview This provider defines a Terraform resource that allows one to manage Consul ACL tokens with Terraform. ## Resources This plugin defines following resources: * `transform_group_by_value` - manages a single Consul ACL token ## Reference Provider is configurable with number of parameters: * `address` - String, host and port used to connect to Consul. Defaults to `localhost:8500`. Can be set via environment variables `CONSUL_ADDRESS` or `CONSUL_HTTP_ADDR`. * `token` - String, ACL token to use for API calls to Consul. Must be a `management` token to manage ACLs. Defaults to empty value. Can be set via environment variables `CONSUL_TOKEN` or `CONSUL_HTTP_TOKEN`. * `scheme` - String, scheme to use to connect to Consul. Defaults to `http`. Can be set via environment variables `CONSUL_SCHEME` or `CONSUL_HTTP_SCHEME`. * `ca_file` - String, path to a certificate of a certification authority (CA) that was used to sign Consul's TLS certificate and therefore should be used for TLS validation. Defaults to system bundle if not specified. Can be set via environment variable `CONSUL_CA_FILE`. * `cert_file` - String, path to a client certificate for client-side TLS authentication, if enabled in Consul. Can be set via environment variable `CONSUL_CERT_FILE`. * `key_file` - String, path to a private key for client certificate provided in `cert_file`. Can be set via environment variable `CONSUL_KEY_FILE`. * `tls_skip_verify` - Bool, whether to skip verification of Consul's TLS certificate. Defaults to `false`. Can be set via environment variable `CONSUL_TLS_SKIP_VERIFY`. ### consulacl_token #### Arguments The following arguments are supported: * `name` - (Required) String defining name for the token * `type` - (Required) String defining type of the token - either `client` or `management` * `token` - (Optional) If set, defines the token ID. If unset - a unique UUID will be generated by Consul and exportedby the resource. It is a sensitive data. * `inherits` - (Optional) An array of resources which this ACL will inherit the rules from. Highest permissions win in the case of duplicate rules. * `rule` - (Optional) Set of rules to assign to the token. Each rule is defined as a map with following fields: * `scope` - (Required) String defining a scope of the rule. One of: `agent`, `event`, `key`, `node`, `query`, `service`, `session`, `keyring` and `operator`. * `policy` - (Required) String defining a policy of the rule. One of: `read`, `write`. * `prefix` - (Optional) String defining a prefix limiting the rule's effect. Not allowed for `keyring` and `operator` scopes. #### Attributes The following attribute is exported: * `token` - String, the ACL token's value. Sensitive. * `id` - String, SHA256 hash of `token` attribute. ## Installation > Terraform automatically discovers the Providers when it parses configuration files. > This only occurs when the init command is executed. Currently Terraform is able to automatically download only [official plugins distributed by HashiCorp](https://github.com/terraform-providers). [All other plugins](https://www.terraform.io/docs/providers/type/community-index.html) should be installed manually. > Terraform will search for matching Providers via a > [Discovery](https://www.terraform.io/docs/extend/how-terraform-works.html#discovery) process, **including the current > local directory**. This means that the plugin should either be placed into current working directory where Terraform will be executed from or it can be [installed system-wide](https://www.terraform.io/docs/configuration/providers.html#third-party-plugins). ## Usage ### main.tf ```hcl resource "consulacl_token" "token" { name = "A demo token" token = "<PASSWORD>" type = "client" rule { scope="key" policy="write" prefix="foo/bar/baz" } rule { scope="service" policy="read" prefix="" } rule { scope="operator" policy="read" } } resource "consulacl_token" "inherited" { name = "Inherited token" type = "client" rule { scope="key" policy="write" prefix="foo/bar" } rule { scope="key" policy="read" prefix="foo/bar/baz" } inherits = [ "${consulacl_token.token.rule}" ] } ``` ### Download ```bash $ wget "https://github.com/ashald/terraform-provider-consulacl/releases/download/v1.1.1/terraform-provider-consulacl_v1.1.1-$(uname -s | tr '[:upper:]' '[:lower:]')-amd64" $ chmod +x ./terraform-provider-transform* ``` ### Init ```bash $ ls -1 main.tf terraform-provider-consulacl_v1.1.1-linux-amd64 $ terraform init Initializing provider plugins... The following providers do not have any version constraints in configuration, so the latest version was installed. To prevent automatic upgrades to new major versions that may contain breaking changes, it is recommended to add version = "..." constraints to the corresponding provider blocks in configuration, with the constraint strings suggested below. * provider.consulacl: version = "~> 1.0" Terraform has been successfully initialized! You may now begin working with Terraform. Try running "terraform plan" to see any changes that are required for your infrastructure. All Terraform commands should now work. If you ever set or change modules or backend configuration for Terraform, rerun this command to reinitialize your working directory. If you forget, other commands will detect it and remind you to do so if necessary. ``` ### Apply ```bash $ terraform apply An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: + create Terraform will perform the following actions: + consulacl_token.token id: <computed> name: "A demo token" rule.#: "3" rule.1495889372.policy: "write" rule.1495889372.prefix: "foo/bar/baz" rule.1495889372.scope: "key" rule.2015766991.policy: "read" rule.2015766991.prefix: "" rule.2015766991.scope: "service" rule.4269786272.policy: "read" rule.4269786272.prefix: "" rule.4269786272.scope: "operator" token: <sensitive> type: "client" Plan: 1 to add, 0 to change, 0 to destroy. Do you want to perform these actions? Terraform will perform the actions described above. Only 'yes' will be accepted to approve. Enter a value: yes consulacl_token.token: Creating... name: "" => "A demo token" rule.#: "0" => "3" rule.1495889372.policy: "" => "write" rule.1495889372.prefix: "" => "foo/bar/baz" rule.1495889372.scope: "" => "key" rule.2015766991.policy: "" => "read" rule.2015766991.prefix: "" => "" rule.2015766991.scope: "" => "service" rule.4269786272.policy: "" => "read" rule.4269786272.prefix: "" => "" rule.4269786272.scope: "" => "operator" token: "<sensitive>" => "<sensitive>" type: "" => "client" consulacl_token.token: Creation complete after 0s (ID: 929a4284c3<PASSWORD>ba4a96dbbcf<PASSWORD>160258643e4d1beb9a15fff6c6bcd027) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. ``` ### Import ```bash $ terraform import consulacl_token.token "<PASSWORD>" consulacl_token.token: Importing from ID "<PASSWORD>"... consulacl_token.token: Import complete! Imported consulacl_token (ID: <KEY>) consulacl_token.token: Refreshing state... (ID: <KEY>) Import successful! The resources that were imported are shown above. These resources are now in your Terraform state and will henceforth be managed by Terraform. ``` ## Development ### Go In order to work on the provider, [Go](http://www.golang.org) should be installed first (version 1.8+ is *required*). [goenv](https://github.com/syndbg/goenv) and [gvm](https://github.com/moovweb/gvm) are great utilities that can help a lot with that and simplify setup tremendously. [GOPATH](http://golang.org/doc/code.html#GOPATH) should be setup correctly and as long as `$GOPATH/bin` should be added `$PATH`. ### Source Code Source code can be retrieved either with `go get` ```bash $ go get -u -d github.com/ashald/terraform-provider-consulacl ``` or with `git` ```bash $ mkdir -p ${GOPATH}/src/github.com/ashald/terraform-provider-consulacl $ cd ${GOPATH}/src/github.com/ashald/terraform-provider-consulacl $ git clone <EMAIL>:ashald/terraform-provider-consulacl.git . ``` ### Test #### Unit Tests ```bash $ make test go test -v ./... ? github.com/ashald/terraform-provider-consulacl [no test files] === RUN TestProvider --- PASS: TestProvider (0.00s) === RUN TestIntegrationToken --- SKIP: TestIntegrationToken (0.00s) testing.go:427: Acceptance tests skipped unless env 'TF_ACC' set === RUN TestIntegrationTokenImport --- SKIP: TestIntegrationTokenImport (0.00s) testing.go:427: Acceptance tests skipped unless env 'TF_ACC' set PASS ok github.com/ashald/terraform-provider-consulacl/consulacl (cached) go vet ./... ``` #### Integration Tests This requires a running Consul agent locally. ```bash $ make test-integration TF_ACC=1 go test -v ./... -timeout 1m ? github.com/ashald/terraform-provider-consulacl [no test files] === RUN TestProvider --- PASS: TestProvider (0.00s) === RUN TestIntegrationToken --- PASS: TestIntegrationToken (0.30s) === RUN TestIntegrationTokenImport --- PASS: TestIntegrationTokenImport (0.06s) PASS ok github.com/ashald/terraform-provider-consulacl/consulacl 0.391s ``` If you have [Docker](https://docs.docker.com/install/) installed, you can run Consul with the following command: ```bash $ make test-server latest: Pulling from library/consul Digest: sha256:ae2c9409a77533485982c00f5c1eab89c090889318cb2f4276d64a7d125f83f8 Status: Image is up to date for consul:latest docker run --rm -p 127.0.0.1:8500:8500 -e CONSUL_LOCAL_CONFIG='{"acl_datacenter": "dc1", "acl_master_token": "<PASSWORD>", "bootstrap_expect": 1, "server": true, "ui": true}' 'consul:latest' ... ``` By default, this will use the [latest version of Consul based on the latest image in the Docker repository](https://hub.docker.com/_/consul/). You can specify a version using `CONSUL_VERSION` environment variable: ```bash $ CONSUL_VERSION=1.2.0 make test-server ``` This command will run in foreground and will stop Consul when interrupted. Images will be cached locally by Docker so it is quick to restart the server as necessary. This will expose Consul on the default address `127.0.0.1:8500` but this can be changed with `CONSUL_ADDRESS` environment variable. ### Build In order to build plugin for the current platform use [GNU]make: ```bash $ make build go build -o terraform-provider-consulacl_v1.1.1 ``` it will build provider from sources and put it into current working directory. If Terraform was installed (as a binary) or via `go get -u github.com/hashicorp/terraform` it'll pick up the plugin if executed against a configuration in the same directory. ### Release In order to prepare provider binaries for all platforms: ```bash $ make release GOOS=darwin GOARCH=amd64 go build -o './release/terraform-provider-consulacl_v1.1.1-darwin-amd64' GOOS=linux GOARCH=amd64 go build -o './release/terraform-provider-consulacl_v1.1.1-linux-amd64' ``` ### Versioning This project follow [Semantic Versioning](https://semver.org/) ### Changelog This project follows [keep a changelog](https://keepachangelog.com/en/1.0.0/) guidelines for changelog. ### Contributors Please see [CONTRIBUTORS.md](./CONTRIBUTORS.md) ## License This is free and unencumbered software released into the public domain. See [LICENSE](./LICENSE) <file_sep>/consulacl/provider_test.go package consulacl import ( "testing" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) var testProviders map[string]terraform.ResourceProvider var aclProvider *schema.Provider func init() { aclProvider = Provider().(*schema.Provider) testProviders = map[string]terraform.ResourceProvider{ "consulacl": aclProvider, } } func TestProvider(t *testing.T) { if err := aclProvider.InternalValidate(); err != nil { t.Fatalf("err: %s", err) } } <file_sep>/Makefile NAME := terraform-provider-consulacl PLATFORMS ?= darwin/amd64 linux/amd64 windows/amd64 VERSION ?= $(shell git describe &>/dev/null && echo "_$$(git describe)") temp = $(subst /, ,$@) os = $(word 1, $(temp)) arch = $(word 2, $(temp)) BASE := $(NAME)$(VERSION) RELEASE_DIR := ./release CONSUL_ADDRESS ?= 127.0.0.1:8500 CONSUL_LOCAL_CONFIG ?= {"acl_datacenter": "dc1", "acl_master_token": "secret", "bootstrap_expect": 1, "server": true, "ui": true} CONSUL_VERSION ?= latest CONSUL_TOKEN ?= secret all: clean format test release clean: rm -rf $(RELEASE_DIR) ./$(NAME)* format: go fmt ./... test: go test -v ./... go vet ./... test-server: @docker pull 'consul:$(CONSUL_VERSION)' docker run --rm -p $(CONSUL_ADDRESS):8500 -e CONSUL_LOCAL_CONFIG='$(CONSUL_LOCAL_CONFIG)' 'consul:$(CONSUL_VERSION)' test-integration: TF_ACC=1 go test -v ./... -timeout 1m build: go build -o $(BASE) release: $(PLATFORMS) $(PLATFORMS): GOOS=$(os) GOARCH=$(arch) go build -o '$(RELEASE_DIR)/$(BASE)-$(os)-$(arch)' .PHONY: $(PLATFORMS) release build test fmt clean all
d20bbc0104da39e8ac447e22a012d5e9024ac220
[ "Markdown", "Go", "Makefile" ]
4
Markdown
pauloconnor/terraform-provider-consulacl
9ce3eea49bedc4d28ebe6a0297fe011a1c33ca86
e62b0a9781c88ae67c9b4e040a79a189eb4ba165
refs/heads/master
<file_sep>/* * carebot-tracker.js is library that checks if an element is visible on the page * and reports it to pym.js. * Check out the readme at README.md for usage. */ /*globals define, attachEvent, addEventListener: true */ /* global module, console */ (function(factory) { if (typeof define === 'function' && define.amd) { define(factory); } else if (typeof module !== 'undefined' && module.exports) { module.exports = factory(); } else { window.CarebotTracker = factory.call(this); } })(function() { var lib = {}; /** * Timer * @param {Function} callback Optional. * Called every time a new time bucket is reached * */ lib.Timer = function(callback) { // Adapted from // https://github.com/nprapps/elections16/blob/master/www/js/app.js#L298-L335 var MAX_SECONDS = 60 * 20 + 1; // 20 minutes 1 second var startTime; var previousTotalSeconds = 0; var previousBucket; var alerter; function getTimeBucket(seconds) { var minutes, timeBucket; if (seconds < 60) { var tensOfSeconds = Math.floor(seconds / 10) * 10; timeBucket = tensOfSeconds.toString() + 's'; } else if (seconds >=60 && seconds < 300) { minutes = Math.floor(seconds / 60); timeBucket = minutes.toString() + 'm'; } else { minutes = Math.floor(seconds / 60); var fivesOfMinutes = Math.floor(minutes / 5) * 5; timeBucket = fivesOfMinutes.toString() + 'm'; } return timeBucket; } function getSecondsSince(startTime) { if (!startTime) { return 0; } var currentTime = new Date(); var totalTime = Math.abs(currentTime - startTime); var seconds = Math.floor(totalTime/1000); return seconds; } function calculateTimeBucket(startTime) { var totalTime = getSecondsSince(startTime) + previousTotalSeconds; var timeBucket = getTimeBucket(totalTime); return { bucket: timeBucket, seconds: totalTime }; } function check() { return calculateTimeBucket(startTime); } function reportBucket() { if (!callback) { return; } var results = calculateTimeBucket(startTime); if (results.bucket !== previousBucket) { // Don't report forever if (results.seconds >= MAX_SECONDS) { return; } callback(results); previousBucket = results.bucket; } } function start() { startTime = new Date(); reportBucket(); if (callback) { alerter = setInterval(reportBucket, 10000); } } function pause() { previousTotalSeconds = getSecondsSince(startTime) + previousTotalSeconds; clearInterval(alerter); startTime = undefined; } return { start: start, pause: pause, check: check }; }.bind(this); /** * Tracks how long an element is visible. * * @class Parent * @param {String} id The id of the element the tracker will watch. * @param {Function} callback Will be called on every new time bucket. * @param {Object} config Configuration to override the default settings. */ lib.VisibilityTracker = function(id, callback, config) { var WAIT_TO_ENSURE_SCROLLING_IS_DONE = 50; var el = document.getElementById(id); var isVisible = false; var timeout; var timer = new lib.Timer(callback); // Ensure a config object config = (config || {}); function isElementInViewport(el) { // Adapted from http://stackoverflow.com/a/15203639/117014 // // Returns true only if the WHOLE element is in the viewport var rect = el.getBoundingClientRect(); var vWidth = window.innerWidth || document.documentElement.clientWidth; var vHeight = window.innerHeight || document.documentElement.clientHeight; // Core tests: are all sides of the rectangle in the viewport? /* var leftIsOffScreen = rect.left < 0; var rightIsOffScreen = rect.right > vWidth; var bottomIsOffScreen = rect.bottom > vHeight; var topIsOffScreen = rect.top < 0; if (leftIsOffScreen || rightIsOffScreen || topIsOffScreen || bottomIsOffScreen) { return false; } */ // Track partial visibility. var leftSideIsToRightOfWindow = rect.left > vWidth; var rightSideIsToLeftOfWindow = rect.right < 0; var topIsBelowVisibleWindow = rect.top > vHeight; var botomIsAboveVisibleWindow = rect.bottom < 0; if (leftSideIsToRightOfWindow || rightSideIsToLeftOfWindow || topIsBelowVisibleWindow || botomIsAboveVisibleWindow) { return false; } return true; } function checkIfVisible () { var newVisibility = isElementInViewport(el); if (isVisible && !newVisibility) { timer.pause(); } if (!isVisible && newVisibility) { timer.start(); } console.log("Checked visibility", newVisibility); isVisible = newVisibility; return newVisibility; } function handler() { // Only register a new event every 1/10 of a second // That way we don't record an absurd number of events if (timeout) { window.clearTimeout(timeout); } timeout = window.setTimeout(checkIfVisible, WAIT_TO_ENSURE_SCROLLING_IS_DONE); } // Listen to different window movement events if (window.addEventListener) { addEventListener('DOMContentLoaded', handler, false); addEventListener('load', handler, false); addEventListener('scroll', handler, false); addEventListener('resize', handler, false); } else if (window.attachEvent) { attachEvent('onDOMContentLoaded', handler); // IE9+ :( attachEvent('onload', handler); attachEvent('onscroll', handler); attachEvent('onresize', handler); } checkIfVisible(); }; /** * Tracks scroll depth */ lib.ScrollTracker = function(id, callback, config) { var WAIT_TO_ENSURE_SCROLLING_IS_DONE = 100; var elt = document.getElementById(id); var lastPosition = -1; var ticking = false; // Detect request animation frame var requestAnimationFrame = window.requestAnimationFrame || window.webkitRequestAnimationFrame || window.mozRequestAnimationFrame || window.msRequestAnimationFrame || window.oRequestAnimationFrame || // IE Fallback, you can even fallback to onscroll function(callback){ window.setTimeout(callback, WAIT_TO_ENSURE_SCROLLING_IS_DONE); }; if (!elt) { return; } // Start tracking the time on page. var timer = new lib.Timer(); timer.start(); var previousBucket = -1; function getPageScroll() { var body = document.body; var docEl = document.documentElement; var scrollTop; var scrollLeft; if (window.hasOwnProperty('pageYOffset')) { scrollTop = window.pageYOffset; scrollLeft = window.pageXOffset; } else if (docEl.hasOwnProperty('scrollTop')) { scrollTop = docEl.scrollTop; scrollLeft = docEl.scrollLeft; } else { scrollTop = body.scrollTop; scrollLeft = body.scrollLeft; } return { scrollTop: scrollTop, scrollLeft: scrollLeft }; } function getCoords(elem) { // via http://stackoverflow.com/a/26230989/117014 var box = elem.getBoundingClientRect(); var body = document.body; var docEl = document.documentElement; var scroll = getPageScroll(); var scrollTop = scroll.scrollTop; var scrollLeft = scroll.scrollLeft; if (window.hasOwnProperty('pageYOffset')) { scrollTop = window.pageYOffset; scrollLeft = window.pageXOffset; } else if (docEl.hasOwnProperty('scrollTop')) { scrollTop = docEl.scrollTop; scrollLeft = docEl.scrollLeft; } else { scrollTop = body.scrollTop; scrollLeft = body.scrollLeft; } var clientTop = docEl.clientTop || body.clientTop || 0; var clientLeft = docEl.clientLeft || body.clientLeft || 0; var top = box.top + scrollTop - clientTop; var left = box.left + scrollLeft - clientLeft; return { top: Math.round(top), left: Math.round(left) }; } function depthPercent() { var eltTopPosition = getCoords(elt).top; var articleHeight = elt.offsetHeight; var eltBottomPosition = eltTopPosition + articleHeight; var scrollTop = getPageScroll().scrollTop; var scrollBottom = scrollTop + window.innerHeight; var percent = (scrollBottom - eltTopPosition) / articleHeight; return percent; } function percentBucket(n) { return Math.round(n * 10) * 10; } function trackDepth() { var percent = depthPercent(); var bucket = percentBucket(percent); if (bucket > previousBucket) { callback(bucket, timer.check().seconds); } previousBucket = bucket; } function update() { trackDepth(); ticking = false; } function requestTick() { if (lastPosition == window.pageYOffset) { return false; } else { lastPosition = window.pageYOffset; } if(!ticking) { requestAnimationFrame(update); ticking = true; } } // only listen for scroll events window.addEventListener('scroll', requestTick, false); }; return lib; }); <file_sep># Carebot Tracker [![Build Status](https://travis-ci.org/thecarebot/carebot-tracker.svg?branch=master)](https://travis-ci.org/thecarebot/carebot-tracker) [Carebot](http://thecarebot.github.io) is an effort in thinking about alternative ways to look at analytics for journalism: both the measures and indicators used to understand story impact, and the way which analytics data is used in the newsroom. This repository has the trackers you'll put on your website to measure interactions with your articles. You might also want to run the [Carebot Slackbot](https://github.com/thecarebot/carebot#carebot-the-slackbot) to report data through notifications. ## Quickstart If you are using Google Analytics, here's how to include the carebot tracker and start tracking how much of an article someone has read. Put this code after the Google Analytics code block. You'll need to change `element-id` to match the CSS ID of the article on your page, and ``` <script type="text/javascript" src="carebot-tracker.min.js"></script> <script type="text/javascript"> var tracker = new CarebotTracker.ScrollTracker('element-id', function(percent, seconds) { var eventData = { 'hitType': 'event', 'eventCategory': 'your-page-slug-here', // something to identify the story later 'eventAction': 'scroll-depth', 'eventLabel': percent, 'eventValue': seconds }; ga('send', eventData); // Assumes GA has already been set up. }); </script> ``` ## The trackers Here are more details on the two trackers available and how to use them: ### Time on Screen Tracker The Visibility Tracker records how long an element is visible on screen. It reports the time in standard buckets: * From zero up to 59 seconds: 10 second intervals (eg `10s`, `20s`, `30s`...) * 60 up to 300 seconds: one-minute intervals (eg `1m`, `2m`...) * More than 300 seconds: five-minute intervals (eg `5m`, `10m`...) Here's how to setup this tracker: ``` <script type="text/javascript" src="carebot-tracker.min.js"></script> <script type="text/javascript"> var tracker = new CarebotTracker.VisibilityTracker('element-id', function(bucket) { console.log("The user has seen the graphic for " + bucket); // eg "10s", "2m" }); </script> ``` ### Scroll Depth Tracker The ScrollTracker measures how much of a given element has been "read" (passed the bottom of the screen). As you scroll down, it'll record every 10% of an an element you read, as well as how long you've spent on the page so far in seconds. If you scroll down, then up, then down again, it'll re-record those percentages with the new time you hit them. Replace `element-id` with the ID of the element you want measure scroll depth for. Usually, this is the ID of the main article on the page. Here's an example of how to add the tracker: ``` <script type="text/javascript" src="carebot-tracker.min.js"></script> <script type="text/javascript"> var tracker = new CarebotTracker.ScrollTracker('element-id', function(percent, seconds) { console.log("The user has gone", percent, "percent down the page after", seconds, "seconds"); }); </script> ``` ### How to send the data to Pym This is a rare edge case (we set it up to meet NPR's specific implementation). If you're using [pym](https://github.com/nprapps/pym.js)_and) your graphic uses a different analytics property than the parent page, you can pass in the bucket values to the pym child using code like this: ``` var tracker = new CarebotTracker.ScrollTracker('element-id', function(percent, seconds) { console.log(" pymParent.sendMessage('scroll-depth', { percent: percent, // Percents as a number: "10", "120" seconds: seconds }); }); ``` ### Timer The timer is a utility class that works like a stopwatch. You probably won't need to use it directly unless you're building a new tracker. #### Time buckets The timer's special feature is that it returns times in the standard NPR time buckets as strings (in addition to a plain `seconds` count). The time buckets are: * From zero up to 59 seconds: 10 second intervals (eg `10s`, `20s`, `30s`...) * 60 up to 300 seconds: one-minute intervals (eg `1m`, `2m`...) * More than 300 seconds: five-minute intervals (eg `5m`, `10m`...) #### Methods ##### Constructor ``` var timer = new CarebotTracker.Timer(); ``` An optional callback will be called on every new bucket: ``` var timer = new CarebotTracker.Timer(function(result) { console.log(result.bucket, result.seconds); }); ``` ##### `start` Starts the timer. ``` var timer = new CarebotTracker.Timer(); timer.start(); ``` ##### `pause` Pauses the timer. Note that this does not zero out the timer value. ``` var timer = new CarebotTracker.Timer(); timer.start(); timer.pause(); ``` ##### `check` Gets the seconds elapsed and current time bucket ``` var timer = new CarebotTracker.Timer(); timer.start(); // wait 300 seconds console.log(timer.check()); // prints { bucket: '5m', seconds: 300 } ``` #### Example ``` var timer = new CarebotTracker.Timer(); timer.start(); // wait 300 seconds timer.pause(); console.log(timer.check()); // prints { bucket: '5m', seconds: 300 } timer.start(); // wait 60 seconds timer.check(); // prints { bucket: '5m', seconds: 360 } ``` ## Development Here's what you need to make Carebot Tracker better. ### Getting started You'll need node and npm to get started. After installing node, you can install the dependencies by running `npm install`. ### Developing Run `grunt watch` from the project root to track changes, automatically lint the JS, and build the minimized and source versions that end up in `/dist`. ### Building Run `grunt` from the project root to lint the files and package them in `/dist`. ### Testing Run `mocha` from the project root to run the test suite. To manually test while developing, start a simple server from the project root: ``` python -m SimpleHTTPServer 8000 ``` And then load load http://localhost:8000/test/index.html This is less than ideal and should be replaced with an automated selenium test rig. ## Alternatives If you're using jquery on the page, these plugins by <NAME> could simplify your life and can act as replacements for pym: * [Scroll Depth](http://scrolldepth.parsnip.io/) * [Riveted](http://riveted.parsnip.io/) for measuring active time on site * [Screentime](http://screentime.parsnip.io/) for measuring time an element is on screen *** ## Contribute to the project Here are a few ways you can help make Carebot more useful: 1. Fix bugs in the Carebot [tracker](https://github.com/thecarebot/carebot-tracker) and [slackbot](https://github.com/thecarebot/carebot). 2. Report new [new issues](https://github.com/thecarebot/carebot/issues/new) you find. 3. Contribute new metrics to the Carebot tracker via pull request. 4. Contribute new notification methods to Carebot via pull request (SMS, Hipchat, etc). 5. Add to or edit [this documentation](https://github.com/thecarebot/carebot-tracker/blob/master/README.md). 6. [Tell people](http://twitter.com) about Carebot and how more meaningful analytics can help them.
8f071130ea4d1130039e3c88b8d9d7c0f7ca3b4b
[ "JavaScript", "Markdown" ]
2
JavaScript
PotterSys/carebot-tracker
270b03f01dfe516f8d4a1dec1d665a9f1f4c6f4c
a33283ebcdca97164a1745d4ac825e9bd03085c7
refs/heads/master
<repo_name>PwnMonkey13/Order.js-Generator<file_sep>/order.js var AllProducts = getProducts(); var Product = function(json){ if(json.REF != "" && json.DES != "" && json.PRIX != "" && json.PIECES != ""){ return {REF : json.REF, DES:json.DES, PRIX:json.PRIX, PIECES:json.PIECES} } }; var Order = function(date,index,MaxProducts,ProdTime,DeliveryTime){ var EndProd = date; EndProd.setDate(EndProd.getDate() + random(ProdTime-2, ProdTime+5)); var deliveryDate = EndProd; deliveryDate.setDate(deliveryDate.getDate() + random(DeliveryTime-2,DeliveryTime+2)); var Products = []; for(var i=0;i<MaxProducts;i++){ Products.push(new Product(AllProducts[random(0,AllProducts.length-1)])); } var ref = "Order_"+date.toLocaleDateString()+"_"+index; return {REF : ref, DATE : date.toLocaleDateString(), ENDPRODDATE : EndProd.toLocaleDateString(), DELIVERYDATE :deliveryDate.toLocaleDateString(), PRODUCTS : Products}; }; var Orders = function(MinMonth,MaxMonth,MaxOrdersPerDay,MinMaxProduits,MaxMaxProduits,ProdTime,DeliveryTime){ this.Orders = []; this.Production = 0; this.AvgOrders = 0; this.AvgProduction = 0; this.GenerateOrders = function() { var date_from = new Date(); this.days = 7; this.inc = 0; var date_to = new Date(); var sum =0; date_from.setMonth(date_from.getMonth()-MinMonth); date_to.setMonth(date_to.getMonth()+MaxMonth); var w=0; var weeks = []; var temp = new Date(); temp.setMonth(temp.getMonth()-MinMonth); while (date_from < date_to){ w++; var ProductionPerWeek = 0; var temp = date_from; for (var i = 0; i < this.days; i++) { temp.setDate(temp.getDate() + 1); //jour de la semaine this.OrdersPerDay = random(2,MaxOrdersPerDay); for (var j = 0; j < this.OrdersPerDay; j++) { //nbr de commandes par jour this.ProductsPerOrder = random(1, random(MinMaxProduits, MaxMaxProduits)); var dateString = temp.toLocaleDateString(); var parts = dateString.split('/'); var newdate = new Date(parseInt(parts[2]),parseInt(parts[1]-1), parseInt(parts[0])); var order = new Order(newdate,j,this.ProductsPerOrder, ProdTime, DeliveryTime); ProductionPerWeek += order.PRODUCTS.length; this.Production += ProductionPerWeek; this.Orders.push(order); } } weeks.push({'prod' : ProductionPerWeek, 'orders': this.OrdersPerDay}); if(weeks.length>1){ var cur = weeks[weeks.length-1]; var last = weeks[weeks.length-2]; var pc = (cur.prod - last.prod) / cur.prod * 100; if (Math.abs(pc) > 15) { console.log('Last week: ' + last.prod); console.log('Curret week =' +cur.prod); console.log('Pourcentage d\'évolution : ' + pc + '%'); console.log('-------- PAS COHERENT ---------'); this.inc++; } } //reattribute of new max Product per week with +- 15% ProductionPerWeek = random(Math.ceil(ProductionPerWeek * (0.85)), Math.ceil(ProductionPerWeek / (0.85))); this.findMultiple(this.ProductsPerOrder, this.OrdersPerDay, ProductionPerWeek); date_from.setDate(date_from.getDate()+7); } weeks.forEach(function(content){ sum += content.orders; }); this.AvgOrders = sum / (weeks.length); this.AvgProduction += this.Production/weeks.length; console.log(weeks.length + ' Semaines'); console.log(this.Production+' Produits'); console.log(this.AvgProduction+' Produits par semaine en moyenne'); console.log(Object.size(this.Orders)+' Commandes'); console.log(this.AvgOrders+' Commandes par semaine en moyenne'); console.log(this.inc+' INCOHERENCES'); console.log(this.Orders); //console.log(JSON.stringify(this.Orders)); }; this.findMultiple = function(a,b,tot){ if(Math.abs(a*b*7 - tot) > 0.2*tot) { console.log('calc ' + a, b, tot, Math.abs(tot - a * b * 7), 0.2 * tot); for (var i = 5; i < MaxMaxProduits; i++) { for (var j = 2; j < MaxOrdersPerDay; j++) { if (Math.abs(i * j * 7 - tot) <= 0.2 * tot) { this.ProductsPerOrder = i; this.OrdersPerDay = j; console.log('res ' + i, j, tot, Math.abs(tot - i * j * 7), 0.2 * tot); return; } } } this.ProductsPerOrder = a; this.OrdersPerDay = b; } }; }; function getProducts(){ var obj=null; $.ajax({ url: 'http://92.222.65.30:8000/api/ware/products', dataType: 'json', async:false, success: function (data) { obj = data; } }); return $.map(obj, function(value) { return [value]; }); } function random(min, max){ return Math.floor((Math.random() * (max - min + 1)) + min); } Object.size = function(obj) { var size = 0, key; for (key in obj) { if (obj.hasOwnProperty(key)) size++; } return size; }; <file_sep>/README.md # Order.js-Generator
8364a71a1fd32c792b93dea3e3f19c602ea519e5
[ "JavaScript", "Markdown" ]
2
JavaScript
PwnMonkey13/Order.js-Generator
9e92e6959b5b34adf366c691aad2c41387ffb5cd
7eb2219613fb3105c6ca2cbabe98e60685065ff1
refs/heads/master
<file_sep>#!/bin/bash -e echo "Copia de archivos para configuracion de la FZT" PLAYBOOK="iiab.yml" INVENTORY="ansible_hosts" ansible -m setup -i $INVENTORY localhost --connection=local >> /dev/null ansible-playbook -i $INVENTORY $PLAYBOOK --connection=local <file_sep>#!/bin/bash sudo systemctl restart calibre-serve <file_sep>#!/bin/bash #Cambio de permiso para ejecutar los scripts chmod 755 /opt/iiab/iiab-admin-console/roles/cmdsrv/files/scripts/* #Descarga de contenido offline cd /opt/iiab/iiab-admin-console/roles/cmdsrv/files/scripts/ #Libreria proyecto Gutenberg wget -c --progress=dot:giga http://download.kiwix.org/portable/gutenberg/kiwix-0.9+gutenberg_es_all_2014-10.zip -O /library/downloads/zims/kiwix-0.9+gutenberg_es_all_2014-10.zip unzip -uo /library/downloads/zims/kiwix-0.9+gutenberg_es_all_2014-10.zip -d /library/working/zims/gutenberg_es_all_2014-10 ./zim_install_step3.sh gutenberg_es_all_2014-10 #Ecured #wget -c --progress=dot:giga http://download.kiwix.org/zim/other/ecured_es_all_2018-04.zim -O /library/working/zims/ecured_es_all_2018-04.zim #./zim_install_move.sh ecured_es_all_2018-04.zim #Phet #wget -c --progress=dot:giga http://download.kiwix.org/zim/phet/phet_es-pe_2018-06.zim -O /library/working/zims/phet_es-pe_2018-06.zim #./zim_install_move.sh phet_es-pe_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/phet/phet_es_2018-06.zim -O /library/working/zims/phet_es_2018-06.zim ./zim_install_move.sh phet_es_2018-06.zim #Stack Exchange wget -c --progress=dot:giga http://download.kiwix.org/zim/stack_exchange/es.stackoverflow.com_es_all_2018-03.zim -O /library/working/zims/es.stackoverflow.com_es_all_2018-03.zim ./zim_install_move.sh es.stackoverflow.com_es_all_2018-03.zim #Vikidia wget -c --progress=dot:giga http://download.kiwix.org/zim/vikidia/vikidia_es_all_2018-06.zim -O /library/working/zims/vikidia_es_all_2018-06.zim ./zim_install_move.sh vikidia_es_all_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/vikidia/vikidia_es_all_nopic_2018-06.zim -O /library/working/zims/vikidia_es_all_nopic_2018-06.zim ./zim_install_move.sh vikidia_es_all_nopic_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/vikidia/vikidia_es_all_novid_2018-06.zim -O /library/working/zims/vikidia_es_all_novid_2018-06.zim ./zim_install_move.sh vikidia_es_all_novid_2018-06.zim #Wikilibros wget -c --progress=dot:giga http://download.kiwix.org/zim/wikibooks/wikibooks_es_all_novid_2018-06.zim -O /library/working/zims/wikibooks_es_all_novid_2018-06.zim ./zim_install_move.sh wikibooks_es_all_novid_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikibooks/wikibooks_es_all_nopic_2018-06.zim -O /library/working/zims/wikibooks_es_all_nopic_2018-06.zim ./zim_install_move.sh wikibooks_es_all_nopic_2018-06.zim #wikinews wget -c --progress=dot:giga http://download.kiwix.org/zim/wikinews/wikinews_es_all_novid_2018-06.zim -O /library/working/zims/wikinews_es_all_novid_2018-06.zim ./zim_install_move.sh wikinews_es_all_novid_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikinews/wikinews_es_all_nopic_2018-06.zim -O /library/working/zims/wikinews_es_all_nopic_2018-06.zim ./zim_install_move.sh wikinews_es_all_nopic_2018-06.zim #Wikiquotes wget -c --progress=dot:giga http://download.kiwix.org/zim/wikiquote/wikiquote_es_all_nopic_2018-06.zim -O /library/working/zims/wikiquote_es_all_nopic_2018-06.zim ./zim_install_move.sh wikiquote_es_all_nopic_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikiquote/wikiquote_es_all_novid_2018-06.zim -O /library/working/zims/wikiquote_es_all_novid_2018-06.zim ./zim_install_move.sh wikiquote_es_all_novid_2018-06.zim #Wikisources wget -c --progress=dot:giga http://download.kiwix.org/zim/wikisource/wikisource_es_all_novid_2018-06.zim -O /library/working/zims/wikisource_es_all_novid_2018-06.zim ./zim_install_move.sh wikisource_es_all_novid_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikisource/wikisource_es_all_nopic_2018-06.zim -O /library/working/zims/wikisource_es_all_nopic_2018-06.zim ./zim_install_move.sh wikisource_es_all_nopic_2018-06.zim #Wikiversity wget -c --progress=dot:giga http://download.kiwix.org/zim/wikiversity/wikiversity_es_all_novid_2018-06.zim -O /library/working/zims/wikiversity_es_all_novid_2018-06.zim ./zim_install_move.sh wikiversity_es_all_novid_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikiversity/wikiversity_es_all_nopic_2018-06.zim -O /library/working/zims/wikiversity_es_all_nopic_2018-06.zim ./zim_install_move.sh wikiversity_es_all_nopic_2018-06.zim #Wikivoyaje wget -c --progress=dot:giga http://download.kiwix.org/zim/wikivoyage/wikivoyage_es_all_novid_2018-06.zim -O /library/working/zims/wikivoyage_es_all_novid_2018-06.zim ./zim_install_move.sh wikivoyage_es_all_novid_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikivoyage/wikivoyage_es_all_nopic_2018-06.zim -O /library/working/zims/wikivoyage_es_all_nopic_2018-06.zim ./zim_install_move.sh wikivoyage_es_all_nopic_2018-06.zim #Wiktionary wget -c --progress=dot:giga http://download.kiwix.org/portable/wiktionary/kiwix-0.9+wiktionary_es_all_novid_2017-08.zip -O /library/downloads/zims/kiwix-0.9+wiktionary_es_all_novid_2017-08.zip unzip -uo /library/downloads/zims/kiwix-0.9+wiktionary_es_all_novid_2017-08.zip -d /library/working/zims/wiktionary_es_all_novid_2017-08 ./zim_install_step3.sh wiktionary_es_all_novid_2017-08 wget -c --progress=dot:giga http://download.kiwix.org/portable/wiktionary/kiwix-0.9+wiktionary_es_all_nopic_2017-08.zip -O /library/downloads/zims/kiwix-0.9+wiktionary_es_all_nopic_2017-08.zip unzip -uo /library/downloads/zims/kiwix-0.9+wiktionary_es_all_nopic_2017-08.zip -d /library/working/zims/wiktionary_es_all_nopic_2017-08 ./zim_install_step3.sh wiktionary_es_all_nopic_2017-08 #Wikimedicina wget -c --progress=dot:giga http://download.kiwix.org/zim/wikipedia/wikipedia_es_medicine_novid_2018-06.zim -O /library/working/zims/wikipedia_es_medicine_novid_2018-06.zim ./zim_install_move.sh wikipedia_es_medicine_novid_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikipedia/wikipedia_es_medicine_nopic_2018-06.zim -O /library/working/zims/wikipedia_es_medicine_nopic_2018-06.zim ./zim_install_move.sh wikipedia_es_medicine_nopic_2018-06.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikipedia/wikipedia_es_medicine_2018-06.zim -O /library/working/zims/wikipedia_es_medicine_2018-06.zim ./zim_install_move.sh wikipedia_es_medicine_2018-06.zim #Wikipedia wget -c --progress=dot:giga http://download.kiwix.org/zim/wikipedia/wikipedia_es_all_novid_2018-04.zim -O /library/working/zims/wikipedia_es_all_novid_2018-04.zim ./zim_install_move.sh wikipedia_es_all_novid_2018-04.zim wget -c --progress=dot:giga http://download.kiwix.org/zim/wikipedia/wikipedia_es_all_nopic_2018-04.zim -O /library/working/zims/wikipedia_es_all_nopic_2018-04.zim ./zim_install_move.sh wikipedia_es_all_nopic_2018-04.zim #####Modulos Rachel##### #Bibliofilo #rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/es-bibliofilo /library/www/html/modules/ #Biblioteca #rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/es-biblioteca /library/www/html/modules/ #Blocky Games rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/es-blockly-games /library/www/html/modules/ #Grandes libros del mundo #rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/es-ebooks /library/www/html/modules/ #Educalab #rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/es-educalab /library/www/html/modules/ #Guia de la salud Hesperian #rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/es-hesperian_health /library/www/html/modules/ #rsync -P /home/fzt-iiab/hesperian/index.html /library/www/html/modules/es-hesperian_health/ #Enciclopedia de la salud #rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/es-medline_plus /library/www/html/modules/ #Soluciones practicas #rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/es-soluciones /library/www/html/modules/ #OpenStreetMap rsync -Pavz --size-only rsync://dev.worldpossible.org/rachelmods/en-worldmap-10 /library/www/html/modules/ #File Share rsync -Pavz rsync://dev.worldpossible.org/rachelmods/es-file_share /library/www/html/modules/ #PhET #rsync -Pavz rsync://dev.worldpossible.org/rachelmods/es-phet-zim /library/www/html/modules/ #GCF2015 #rsync -Pavz rsync://dev.worldpossible.org/rachelmods/es-GCF2015 /library/www/html/modules/ <file_sep><!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>Hesperian Health Guides</title> <link rel="stylesheet" href="search/css/normalize-1.1.3.css"> <link rel="stylesheet" href="search/css/style.css"> <link rel="stylesheet" href="search/css/ui-lightness/jquery-ui-1.10.4.custom.min.css"> <script src="search/js/jquery-1.10.2.min.js"></script> <script src="search/js/jquery-ui-1.10.4.custom.min.js"></script> <script> $(function() { $("#searchform").autocomplete({ source: "search/suggest.php", }); }); </script> </head> <body> <!-- <img src="hesperianlogo.png" alt="Hesperian" style="float: left; margin-top: 5px;"> --> <div style="margin: 0 0 40px 140px;"> <h2>Hesperian Health Guides</h2> <p>Guías de temas de salud fáciles de entender, prácticas, precisas y con mucha ilustración para áreas donde existe poco o limitado acceso a médicos o puestos de salud.</p> <form action="search/search.php"> <input name="query" id="searchform" size="40"> <input type="submit" value="Search"> </form> </div> <ul> <li><a href="agua_para_vivir.pdf">Agua para vivir</a> <li><a href="apoyar_ninos_ciegos.pdf">Apoyar niños ciegos</a> <li><a href="apoyar_ninos_sordos.pdf">Apoyar niños sordos</a> <li><a href="donde_no_hay_dentista.pdf">Donde no hay dentista</a> <li><a href="donde_no_hay_doctor.pdf">Donde no hay doctor</a> <li><a href="saneamiento.pdf">Saneamiento</a> </ul> <p> <small> Estos materiales no proporcionan recomendaciones médicas y es meramente informativo. No se pretende que el contenido sustituya chequeos, diagnósticos o tratamiento profesional médico. Si presenta alguna condición o enfermedad y tiene dudas, busque consejo o ayuda de parte de doctores. No se debe ignorar la atención médica profesional ni alargar su examinación por algo leído en esta página. </small> </p> </body> </html> <file_sep>#!/bin/bash #Configuracion de calibre server systemctl stop calibre-serve cp /home/fzt-iiab/calibre-serve.service /etc/systemd/system/calibre-serve.service cp /home/fzt-iiab/users.sqlite /library/calibre/users.sqlite chmod 777 /library/calibre systemctl daemon-reload systemctl start calibre-serve <file_sep><?php $delete = exec("rm /library/calibre/users.sqlite"); $copy = exec("cp /home/fzt-iiab/users.sqlite /library/calibre/"); $reload = exec("sudo /home/fzt-iiab/./rp.sh"); print "$delete"; print "$copy"; print "$reload"; echo "<a href= /home/>Regreso a menu principal</a>"; ?> <file_sep>#!/bin/bash -x # Installs IIAB. "bash -x" (xtrace) is like -v (verbose) but expands commands. # 1. RUN: "sudo su -" then "raspi-config" to set "Localisation Options" # 2. OPTIONAL: "passwd pi; touch /boot/ssh; reboot" to ssh in immediately # 3. RUN THIS SCRIPT: curl download.iiab.io/6.5/load-big.txt | bash # 4. REBOOTS AUTOMATICALLY WHEN DONE (about 2-3 hours later) which # sets the hostname, while improving RTC settings + memory mgmt set -e # to exit on error (avoids snowballing) export DEBIAN_FRONTEND=noninteractive apt update apt -y dist-upgrade apt -y clean # Above updates OS for security especially mkdir -p /opt/iiab cd /opt/iiab/ git clone https://github.com/iiab/iiab -b release-6.5 --depth 1 git clone https://github.com/iiab/iiab-admin-console --depth 1 git clone https://github.com/iiab/iiab-menu --depth 1 git clone https://github.com/iiab/iiab-factory --depth 1 cd /opt/iiab/iiab/scripts/ ./ansible # Installs latest Ansible from PPA # ./ansible-2.4.x # ./ansible-2.5.x cd /home/fzt-iiab/ ./config.sh # In general please examine local_vars.yml carefully (and modify as nec) # before running Ansible (below, which can take ~2 hours the first time!) # NOTE: you can change many/most settings after install too, using the # Admin Console (http://box/admin) as documented at: http://FAQ.IIAB.IO cd /opt/iiab/iiab/ ./iiab-install # TRY TO RERUN THE ABOVE LINE IF IT FAILS (if networking glitches etc?) cd /opt/iiab/iiab-admin-console/ ./install # Installs Admin Console; runs iiab-get-kiwix-cat to d/l Kiwix catalog cd /opt/iiab/iiab-menu/ ./cp-menus # Installs Dynamic Menuing for /library/www/html/home/index.html iiab-make-kiwix-lib # Rebuilds local library.xml in case ZIM files added to /library/zims/content export KALITE_HOME=/library/ka-lite kalite manage generate_zone # Register with KA Lite - just the anonymous registration kalite manage retrievecontentpack download es kalite manage retrievecontentpack download en # Get KA Lite English language pack (slow download!) #Agregando contenido /home/fzt-iiab/./contenido.sh #Configuracion de calibre server /home/fzt-iiab/./calibre.sh #Agregando permiso para reinicio de password de calibre server chmod 777 /etc/sudoers echo "www-data ALL=(ALL) NOPASSWD: /home/fzt-iiab/rp.sh" >> /etc/sudoers chmod 444 /etc/sudoers #Copiando php para reinicio de password cp /home/fzt-iiab/rp.php /library/www/html/iiab-menu/menu-files/html/ reboot <file_sep>#!/bin/bash -x source /etc/iiab/iiab.env {% if is_debuntu %} IPTABLES=/sbin/iptables IPTABLES_DATA=/etc/iptables.up.rules {% else %} IPTABLES=/usr/sbin/iptables IPTABLES_DATA=/etc/sysconfig/iptables {% endif %} LANIF=$IIAB_LAN_DEVICE WANIF=$IIAB_WAN_DEVICE MODE=`grep iiab_network_mode_applied /etc/iiab/iiab.ini | gawk '{print $3}'` clear_fw() { $IPTABLES -F $IPTABLES -t nat -F $IPTABLES -X # first match wins # Always accept loopback traffic $IPTABLES -A INPUT -i lo -j ACCEPT # Always drop rpc $IPTABLES -A INPUT -p tcp --dport 111 -j DROP $IPTABLES -A INPUT -p udp --dport 111 -j DROP # mysql $IPTABLES -A INPUT -p tcp --dport 3306 -j DROP $IPTABLES -A INPUT -p udp --dport 3306 -j DROP # postgre - not needed listens on lo only $IPTABLES -A INPUT -p tcp --dport 5432 -j DROP $IPTABLES -A INPUT -p udp --dport 5432 -j DROP # couchdb $IPTABLES -A INPUT -p tcp --dport 5984 -j DROP $IPTABLES -A INPUT -p udp --dport 5984 -j DROP } if [ "x$WANIF" == "xnone" ] || [ "$MODE" == 'Appliance' ]; then clear_fw # save the rule set {% if is_debuntu %} netfilter-persistent save {% else %} iptables-save > $IPTABLES_DATA {% endif %} exit 0 fi lan=$LANIF wan=$WANIF # Good thing we replace this file should be treated like squid below gw_block_https={{ gw_block_https }} ssh_port={{ ssh_port }} gui_wan={{ gui_wan }} gui_port={{ gui_port }} iiab_gateway_enabled={{ iiab_gateway_enabled }} services_externally_visible={{ services_externally_visible }} calibre_port={{ calibre_port }} kiwix_port={{ kiwix_port }} kalite_server_port={{ kalite_server_port }} block_DNS={{ block_DNS }} captive_portal_enabled={{ captive_portal_enabled }} echo "Lan is $lan and WAN is $wan" # # delete all existing rules. # /sbin/modprobe ip_tables /sbin/modprobe iptable_filter /sbin/modprobe ip_conntrack /sbin/modprobe iptable_nat clear_fw # Allow established connections, and those not coming from the outside $IPTABLES -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT $IPTABLES -A INPUT -m state --state NEW -i $lan -j ACCEPT # Allow mDNS $IPTABLES -A INPUT -p udp --dport 5353 -j ACCEPT #when run as gateway $IPTABLES -A INPUT -p tcp --dport $ssh_port -m state --state NEW -i $wan -j ACCEPT if [ "$gui_wan" == "True" ]; then $IPTABLES -A INPUT -p tcp --dport $gui_port -m state --state NEW -i $wan -j ACCEPT fi if [ "$services_externally_visible" == "True" ]; then $IPTABLES -A INPUT -p tcp --dport $kiwix_port -m state --state NEW -i $wan -j ACCEPT $IPTABLES -A INPUT -p tcp --dport $kalite_server_port -m state --state NEW -i $wan -j ACCEPT $IPTABLES -A INPUT -p tcp --dport $calibre_port -m state --state NEW -i $wan -j ACCEPT fi if [ "$iiab_gateway_enabled" == "True" ]; then $IPTABLES -A POSTROUTING -t nat -o $wan -j MASQUERADE fi $IPTABLES -A FORWARD -i $wan -o $lan -m state --state ESTABLISHED,RELATED -j ACCEPT # drop connections from other (172.18.125.0/24) #i$IPTABLES -A FORWARD -o $wan -s 172.18.125.0/24 -j DROP #Block https traffic except if directed at server if [ "$gw_block_https" == "True" ]; then $IPTABLES -A FORWARD -p tcp ! -d 172.18.96.1 --dport 443 -j DROP fi # Allow outgoing connections from the LAN side. $IPTABLES -A FORWARD -i $lan -o $wan -j ACCEPT # drop connections from other (172.18.125.0/24) $IPTABLES -A FORWARD -o $wan -s 172.18.125.0/24 -j DROP # Don't forward from the outside to the inside. $IPTABLES -A FORWARD -i $wan -o $lan -j DROP $IPTABLES -A INPUT -i $wan -j DROP if [ "$block_DNS" == "True" ];then $IPTABLES -t nat -A PREROUTING -i $lan -p tcp --dport 53 ! -d {{ lan_ip }} -j DNAT --to {{ lan_ip }}:53 $IPTABLES -t nat -A PREROUTING -i $lan -p udp --dport 53 ! -d {{ lan_ip }} -j DNAT --to {{ lan_ip }}:53 fi if [ "$captive_portal_enabled" == "True" ];then $IPTABLES -t mangle -N internet $IPTABLES -t mangle -A PREROUTING -i {{ iiab_lan_iface }} -p tcp -m tcp --dport 80 -j internet $IPTABLES -t mangle -A internet -j MARK --set-mark 99 $IPTABLES -t nat -A PREROUTING -i {{ iiab_lan_iface }} -p tcp -m mark --mark 99 -m tcp --dport 80 -j DNAT --to-destination {{ lan_ip }} elif [ "$HTTPCACHE_ON" == "True" ]; then $IPTABLES -t nat -A PREROUTING -i $lan -p tcp --dport 80 ! -d 172.18.96.1 -j DNAT --to 172.18.96.1:3128 fi # German #if [ -f /etc/sysconfig/xs_httpcache_on ]; then # $IPTABLES -t nat -A PREROUTING -i $lan -p tcp --dport 80 ! -d 172.18.96.1 -j DNAT --to 172.18.96.1:3128 # $IPTABLES -t nat -A PREROUTING -i $lan -p tcp --dport 80 ! -s 172.18.125.0/24 -j DNAT --to 172.18.96.1:3128 #fi # Enable routing. echo 1 > /proc/sys/net/ipv4/ip_forward # save the whole rule set now {% if is_debuntu %} netfilter-persistent save {% else %} iptables-save > $IPTABLES_DATA {% endif %} exit 0
f637825faade5892c42296910a1bb024cab15f45
[ "HTML", "PHP", "Shell" ]
8
Shell
lagm777/fzt-iiab
2577f001f5b28ceae87646f092e3be420050ca51
993368385844478342b090c5ee89a85307fa6d07
refs/heads/master
<file_sep>using System; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Drawing; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Forms; using System.Data.SQLite; namespace WindowsFormsApp4 { public partial class Form1 : Form { //Fix this! String cons; String unitUpdate; String unitFill = @"SELECT DISTINCT Unit FROM InvList"; String customerFill = @"SELECT DISTINCT Customer FROM InvList WHERE Project NOT LIKE '' AND Customer NOT LIKE '';"; String projectFill = @"SELECT DISTINCT Project FROM InvList"; String unitCond = "Unit"; string cond = ""; String project = ""; String customer = ""; String unit = ""; public Form1() { InitializeComponent(); DataClass data = new DataClass(); // Fill the comboboxes initially data.comboFill(unitFill, comboBox1); data.comboFill(customerFill, comboBox2); data.comboFill(projectFill, comboBox3); } private void dataGridView1_CellContentClick(object sender, DataGridViewCellEventArgs e) { } public class DataClass { private SQLiteConnection sqlite; public DataClass() { sqlite = new SQLiteConnection("Data Source=C:/sqlite/NSM20.sqlite;Version=3"); } public DataSet selectQuery(String query, String unit, String customer, String project) { SQLiteDataAdapter ad; DataSet dt = new DataSet(); SQLiteCommand cmd; sqlite.Open(); //Initiate connection to the db cmd = sqlite.CreateCommand(); //Adding SQL parameters cmd.Parameters.AddWithValue("@VarUnit", unit); cmd.Parameters.AddWithValue("@VarCustomer", customer); cmd.Parameters.AddWithValue("@VarProject", project); cmd.CommandText = query; //set the passed query ad = new SQLiteDataAdapter(cmd); ad.Fill(dt); //fill the datasource //MessageBox.Show(dt.Tables[0].Select("Unit is not NULL").Length.ToString()); sqlite.Close(); return dt; } public void comboFill(String query, ComboBox comboBox) { sqlite.Open(); SQLiteCommand cmd = new SQLiteCommand(query, sqlite); SQLiteDataReader DR = cmd.ExecuteReader(); while (DR.Read()) { comboBox.Items.Add(DR[0]); } sqlite.Close(); } public void comboConnect(String query, String unit, String customer, String project, ComboBox comboBox) { sqlite.Open(); SQLiteCommand cmd; cmd = sqlite.CreateCommand(); comboBox.Items.Clear(); cmd = new SQLiteCommand(query, sqlite); //Adding SQL parameters cmd.Parameters.AddWithValue("@varUnit", unit); cmd.Parameters.AddWithValue("@varCustomer", customer); cmd.Parameters.AddWithValue("@varProject", project); cmd.CommandText = query; //set the passed query SQLiteDataReader DR = cmd.ExecuteReader(); while (DR.Read()) { comboBox.Items.Add(DR[0]); } sqlite.Close(); } } private void button1_Click(object sender, EventArgs e) { project = ""; customer = ""; unit = ""; if(comboBox1.SelectedItem != null) { unit = comboBox1.Text; } if (comboBox2.SelectedItem != null) { customer = comboBox2.Text; } if (comboBox3.SelectedItem != null) { project = comboBox3.Text; } cons = @"SELECT * FROM InvList WHERE Unit LIKE @varUnit || '%' AND Customer LIKE @varCustomer || '%' AND Project LIKE @varProject || '%';"; //string addText = textBox1.Text; //comboBox1.Items.Add(addText); DataClass test1 = new DataClass(); dataGridView1.DataSource = test1.selectQuery(cons, unit, customer, project).Tables[0]; } private void comboBox1_SelectedIndexChanged(object sender, EventArgs e) { if (comboBox1.SelectedItem != null) { DataClass combo = new DataClass(); project = comboBox3.Text; customer = comboBox2.Text; unit = comboBox1.Text; String query1 = ""; String query2 = ""; if (comboBox2.SelectedItem == null && comboBox3.SelectedItem == null) { query1 = @"SELECT Project FROM Console WHERE ConUnitNo = @varUnit UNION SELECT Project FROM MainUnit WHERE MainUnitNo = @varUnit UNION SELECT Project FROM SubUnit WHERE SubUnitNo = @varUnit;"; query2 = @"SELECT Customer FROM Console WHERE ConUnitNo = @varUnit UNION SELECT Customer FROM MainUnit WHERE MainUnitNo = @varUnit UNION SELECT Customer FROM SubUnit WHERE SubUnitNo = @varUnit;"; combo.comboConnect(query2, unit, customer, project, comboBox2); combo.comboConnect(query1, unit, customer, project, comboBox3); } else if(comboBox2.SelectedItem != null && comboBox3.SelectedItem == null) { query1 = @"SELECT Project FROM Console WHERE ConUnitNo = @varUnit AND Customer = @varCustomer UNION SELECT Project FROM MainUnit WHERE MainUnitNo = @varUnit AND Customer = @varCustomer UNION SELECT Project FROM SubUnit WHERE SubUnitNo = @varUnit AND Customer = @varCustomer;"; combo.comboConnect(query1, unit, customer, project, comboBox3); } else if(comboBox3.SelectedItem != null && comboBox2.SelectedItem == null) { query1 = @"SELECT Customer FROM Console WHERE ConUnitNo = @varUnit AND Project = @varProject UNION SELECT Customer FROM MainUnit WHERE MainUnitNo = @varUnit AND Project = @varProject UNION SELECT Customer FROM SubUnit WHERE SubUnitNo = @varUnit AND Project = @varProject;"; combo.comboConnect(query1, unit, customer, project, comboBox2); } } } private void comboBox2_SelectedIndexChanged(object sender, EventArgs e) { if (comboBox2.SelectedItem != null) { DataClass combo = new DataClass(); project = comboBox3.Text; customer = comboBox2.Text; unit = comboBox1.Text; String query1 = ""; String query2 = ""; string cond = ""; if (comboBox1.SelectedItem == null && comboBox3.SelectedItem == null) { cond = "Customer"; if (unitCond != "Unit") { query1 = unitBuild(/*cond*/); } else { query1 = @"SELECT ConUnitNo FROM Console WHERE Customer = @varCustomer UNION SELECT MainUnitNo FROM MainUnit WHERE Customer = @varCustomer UNION SELECT SubUnitNo FROM SubUnit WHERE Customer = @varCustomer;"; } query2 = @"SELECT Project FROM Console WHERE Customer = @varCustomer UNION SELECT Project FROM MainUnit WHERE Customer = @varCustomer UNION SELECT Project FROM SubUnit WHERE Customer = @varCustomer;"; combo.comboConnect(query2, unit, customer, project, comboBox3); combo.comboConnect(query1, unit, customer, project, comboBox1); } else if (comboBox1.SelectedItem != null && comboBox3.SelectedItem == null) { query1 = @"SELECT Project FROM Console WHERE ConUnitNo = @varUnit AND Customer = @varCustomer UNION SELECT Project FROM MainUnit WHERE MainUnitNo = @varUnit AND Customer = @varCustomer UNION SELECT Project FROM SubUnit WHERE SubUnitNo = @varUnit AND Customer = @varCustomer;"; combo.comboConnect(query1, unit, customer, project, comboBox3); } else if (comboBox3.SelectedItem != null && comboBox1.SelectedItem == null) { cond = "Project,Customer"; if (unitCond != "Unit") { query1 = unitBuild(/*cond*/); } else { query1 = @"SELECT ConUnitNo FROM Console WHERE Customer = @varCustomer AND Project = @varProject UNION SELECT MainUnitNo FROM MainUnit WHERE Customer = @varCustomer AND Project = @varProject UNION SELECT SubUnitNo FROM SubUnit WHERE Customer = @varCustomer AND Project = @varProject;"; } combo.comboConnect(query1, unit, customer, project, comboBox1); } } } private void comboBox3_SelectedIndexChanged(object sender, EventArgs e) { if (comboBox3.SelectedItem != null) { DataClass combo = new DataClass(); String project = comboBox3.Text; String customer = comboBox2.Text; String unit = comboBox1.Text; String query1 = ""; String query2 = ""; //string cond = ""; if (comboBox1.SelectedItem == null && comboBox2.SelectedItem == null) { cond = "Project"; if (unitCond != "Unit") { query1 = unitBuild(/*cond*/); } else { query1 = @"SELECT ConUnitNo FROM Console WHERE Project = @varProject UNION SELECT MainUnitNo FROM MainUnit WHERE Project = @varProject UNION SELECT SubUnitNo FROM SubUnit WHERE Project = @varProject;"; } query2 = @"SELECT Customer FROM Console WHERE Project = @varProject UNION SELECT Customer FROM MainUnit WHERE Project = @varProject UNION SELECT Customer FROM SubUnit WHERE Project = @varProject;"; combo.comboConnect(query2, unit, customer, project, comboBox2); combo.comboConnect(query1, unit, customer, project, comboBox1); } else if (comboBox1.SelectedItem != null && comboBox2.SelectedItem == null) { query1 = @"SELECT Customer FROM Console WHERE ConUnitNo = @varUnit AND Project = varProject UNION SELECT Customer FROM MainUnit WHERE MainUnitNo = @varUnit AND Project = varProject UNION SELECT Customer FROM SubUnit WHERE SubUnitNo = @varUnit AND Project = varProject;"; combo.comboConnect(query1, unit, customer, project, comboBox2); } else if (comboBox2.SelectedItem != null && comboBox1.SelectedItem == null) { if (unitCond != "Unit") { query1 = unitBuild(/*cond*/); } else { query1 = @"SELECT ConUnitNo FROM Console WHERE Customer = @varCustomer AND Project = @varProject UNION SELECT MainUnitNo FROM MainUnit WHERE Customer = @varCustomer AND Project = @varProject UNION SELECT SubUnitNo FROM SubUnit WHERE Customer = varCustomer AND Project = @varProject;"; } combo.comboConnect(query1, unit, customer, project, comboBox1); } } } private void label3_Click(object sender, EventArgs e) { } private void radioButton1_CheckedChanged(object sender, EventArgs e) { if (radioButton1.Checked) { unitCond = "Unit"; unitUpdate = @"SELECT ConUnitNo FROM Console UNION SELECT MainUnitNo FROM MainUnit UNION SELECT SubUnitNo FROM SubUnit;"; // Må også innom unitBuilder! updateCombo(unitUpdate); } } private void radioButton2_CheckedChanged(object sender, EventArgs e) { DataClass combo = new DataClass(); project = comboBox3.Text; customer = comboBox2.Text; unit = comboBox1.Text; if (radioButton2.Checked) { unitCond = "ConUnitNo"; unitUpdate = unitBuild(); //unitUpdate = @"SELECT DISTINCT ConUnitNo FROM Console"; combo.comboConnect(unitUpdate, unit, customer, project, comboBox1); //updateCombo(unitUpdate); } } private void radioButton3_CheckedChanged(object sender, EventArgs e) { DataClass combo = new DataClass(); project = comboBox3.Text; customer = comboBox2.Text; unit = comboBox1.Text; if (radioButton3.Checked) { unitCond = "MainUnitNo"; unitUpdate = unitBuild(); //unitUpdate = @"SELECT DISTINCT MainUnitNo FROM MainUnit"; //updateCombo(unitUpdate); combo.comboConnect(unitUpdate, unit, customer, project, comboBox1); } } private void radioButton4_CheckedChanged(object sender, EventArgs e) { DataClass combo = new DataClass(); project = comboBox3.Text; customer = comboBox2.Text; unit = comboBox1.Text; if (radioButton4.Checked) { unitCond = "SubUnitNo"; unitUpdate = unitBuild(); //unitUpdate = @"SELECT DISTINCT SubUnitNo FROM SubUnit"; //updateCombo(unitUpdate); combo.comboConnect(unitUpdate, unit, customer, project, comboBox1); } } private void dataGridView1_CellMouseDoubleClick(object sender, DataGridViewCellMouseEventArgs e) { } public void updateCombo(String query) { DataClass data = new DataClass(); unitFill = query; comboBox1.Items.Clear(); data.comboFill(unitFill, comboBox1); //MessageBox.Show(query); } public String unitBuild(/*string cond*/) { string par1 = ""; string par2 = ""; string unitSql = ""; string[] sqlParts = cond.Split(','); if(sqlParts.Length < 2) { par1 = sqlParts[0]; if (!unitCond.Equals("ConUnitNo")) { unitSql = @"SELECT " + unitCond + " FROM " + unitCond.Replace("No", string.Empty) + " WHERE " + par1 + " = @var" + par1 + ";"; } else { unitSql = @"SELECT " + unitCond + " FROM Console WHERE " + par1 + " = @var" + par1 + ";"; } } else { par1 = sqlParts[0]; par2 = sqlParts[1]; if (!unitCond.Equals("ConUnitNo")) { unitSql = @"SELECT " + unitCond + " FROM " + unitCond.Replace("No", string.Empty) + " WHERE " + par1 + " = @var" + par1 + " AND " + par2 + " = @var" + par2 + ";"; } else { unitSql = @"SELECT " + unitCond + " FROM Console WHERE " + par1 + " = @var" + par1 + " AND " + par2 + " = @var" + par2 + ";"; } } if(sqlParts[0] == "") { if (!unitCond.Equals("ConUnitNo")) { unitSql = @"SELECT " + unitCond + " FROM " + unitCond.Replace("No", string.Empty) + ""; } else { unitSql = @"SELECT " + unitCond + " FROM Console"; } } return unitSql; } private void Form1_Load(object sender, EventArgs e) { } } }
695541ea05ea1ced63994ed0cae386807ab915a1
[ "C#" ]
1
C#
AmuBog/NSM-SerieNumberList
178f0d34b17bf0e019533baf666798822e3ce94c
40b5b427479d31f10e187e6da18b8b4024652c94
refs/heads/master
<repo_name>aitonjiaotong/BaMinEverything2016-4-8<file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Ddaibanpaotui/fragment_dabanpaotui/DaiBanPaoTuiFragment.java package com.aiton.bamin.changtukepiao.Ddaibanpaotui.fragment_dabanpaotui; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v4.app.FragmentManager; import android.support.v4.app.FragmentPagerAdapter; import android.support.v4.view.ViewPager; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.GridView; import android.widget.ImageView; import android.widget.TextView; import com.aiton.bamin.changtukepiao.Bchangtukepiao.constant.ConstantTicket; import com.android.volley.VolleyError; import com.aiton.administrator.shane_library.shane.utils.GsonUtils; import com.aiton.administrator.shane_library.shane.utils.HTTPUtils; import com.aiton.administrator.shane_library.shane.utils.VolleyListener; import com.aiton.bamin.changtukepiao.Zeverything.everything_fragment.BannerFragment; import com.aiton.bamin.changtukepiao.Bchangtukepiao.models.about_banner.BannerInfo; import com.aiton.bamin.changtukepiao.Ddaibanpaotui.model.DaBanPaoTuiGridViewItemInfo; import com.aiton.bamin.changtukepiao.R; import com.aiton.bamin.changtukepiao.ZcustomView.ViewPagerIndicator; import com.google.gson.reflect.TypeToken; import java.lang.reflect.Type; import java.util.ArrayList; import java.util.List; public class DaiBanPaoTuiFragment extends Fragment implements View.OnClickListener { private List<BannerInfo> bannerData = new ArrayList<BannerInfo>(); private View mLayout; private ViewPager mViewPager_banner; private int mPagerCount = Integer.MAX_VALUE / 3; private boolean mDragging; private boolean isFrist = true; private ViewPagerIndicator mViewPagerIndicator; private ImageView mIv_dabanpaotui_back; private GridView mGv_dabanpaogui_classify; private List<DaBanPaoTuiGridViewItemInfo> mGridViewItemInfo = new ArrayList<DaBanPaoTuiGridViewItemInfo>(); private DaiBanPaoTuiGridViewAdapter mDaiBanPaoTuiGridViewAdapter; public DaiBanPaoTuiFragment() { } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { mLayout = inflater.inflate(R.layout.fragment_main_dai_ban_pao_tui, null); initBannerData(); initGridViewItemData(); findViewID(); initUI(); setListener(); return mLayout; } private void initGridViewItemData() { mGridViewItemInfo.clear(); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("跑腿", R.mipmap.daibanpaotui_01paotui)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("代驾", R.mipmap.daibanpaotui_02daijia)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("缴费罚款", R.mipmap.daibanpaotui_03jifeifakuan)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("送餐", R.mipmap.daibanpaotui_04songcan)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("家政保洁", R.mipmap.daibanpaotui_05jiazheng)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("维修疏通", R.mipmap.daibanpaotui_06weixiushutong)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("月嫂保姆", R.mipmap.daibanpaotui_07yuesaobaomu)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("开锁换锁", R.mipmap.daibanpaotui_08kaisuo)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("租房租赁", R.mipmap.daibanpaotui_09zufang)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("家教托管", R.mipmap.daibanpaotui_10jiajiao)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("鲜花蛋糕", R.mipmap.daibanpaotui_11xianhua)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("新房二手房", R.mipmap.daibanpaotui_12ershoufang)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("代买彩票", R.mipmap.daibanpaotui_13daimaicaipiao)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("除四害\n测甲醛", R.mipmap.daibanpaotui_14cejiaquan)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("罚款", R.mipmap.daibanpaotui_15fakuan)); mGridViewItemInfo.add(new DaBanPaoTuiGridViewItemInfo("更多", R.mipmap.daibanpaotui_16gengduo)); } private void setListener() { mIv_dabanpaotui_back.setOnClickListener(this); } private void initUI() { initBanner(); initGridView(); } private void initGridView() { mDaiBanPaoTuiGridViewAdapter = new DaiBanPaoTuiGridViewAdapter(); mGv_dabanpaogui_classify.setAdapter(mDaiBanPaoTuiGridViewAdapter); } private void findViewID() { mIv_dabanpaotui_back = (ImageView) mLayout.findViewById(R.id.iv_dabanpaotui_back); mViewPager_banner = (ViewPager) mLayout.findViewById(R.id.vp_headerview_pager); mViewPagerIndicator = (ViewPagerIndicator) mLayout.findViewById(R.id.ViewPagerIndicator); mGv_dabanpaogui_classify = (GridView) mLayout.findViewById(R.id.gv_dabanpaogui_classify); } private void initBannerData() { HTTPUtils.get(getActivity(), ConstantTicket.URL.GET_BANNER_IMG, new VolleyListener() { @Override public void onErrorResponse(VolleyError volleyError) { } @Override public void onResponse(String s) { Type type = new TypeToken<ArrayList<BannerInfo>>() { }.getType(); bannerData = GsonUtils.parseJSONArray(s, type); mViewPager_banner.setAdapter(new MyPagerAdapter(getChildFragmentManager())); } }); } /** * 设置广告条 */ private void initBanner() { mViewPager_banner.addOnPageChangeListener(new BannerOnPageChangeListener()); if (isFrist) { autoScroll(); } } private void autoScroll() { mViewPager_banner.setCurrentItem(mPagerCount / 2); mViewPager_banner.postDelayed(new Runnable() { public void run() { int position = mViewPager_banner.getCurrentItem() + 1; if (!mDragging) { isFrist = false; mViewPager_banner.setCurrentItem(position); } mViewPager_banner.postDelayed(this, 3000); } }, 3000); } @Override public void onClick(View v) { switch (v.getId()) { case R.id.iv_dabanpaotui_back: getActivity().finish(); break; } } class MyPagerAdapter extends FragmentPagerAdapter { public MyPagerAdapter(FragmentManager fm) { super(fm); } @Override public Fragment getItem(int position) { int pager_index = position % bannerData.size(); return new BannerFragment(pager_index, bannerData.get(pager_index).getUrl(), bannerData.get(pager_index).getUrl2()); } @Override public int getCount() { return mPagerCount; } } class BannerOnPageChangeListener implements ViewPager.OnPageChangeListener { public void onPageScrollStateChanged(int state) { switch (state) { case ViewPager.SCROLL_STATE_IDLE: mDragging = false; break; case ViewPager.SCROLL_STATE_DRAGGING: mDragging = true; break; case ViewPager.SCROLL_STATE_SETTLING: mDragging = false; break; default: break; } } public void onPageScrolled(int position, float arg1, int arg2) { position = position % 3; mViewPagerIndicator.move(arg1, position); } public void onPageSelected(int arg0) { } } class DaiBanPaoTuiGridViewAdapter extends BaseAdapter { @Override public int getCount() { return mGridViewItemInfo.size(); } @Override public Object getItem(int position) { return null; } @Override public long getItemId(int position) { return 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { View layout = getActivity().getLayoutInflater().inflate(R.layout.layout_dabanpaotui_gridview_item, null); ImageView iv_item_img = (ImageView) layout.findViewById(R.id.iv_dabanpaotui_grid_item_img); TextView tv_item_name = (TextView) layout.findViewById(R.id.tv_dabanpaotui_grid_item_name); if (mGridViewItemInfo != null && mGridViewItemInfo.size() > 0) { iv_item_img.setImageResource(mGridViewItemInfo.get(position).getIconId()); tv_item_name.setText(mGridViewItemInfo.get(position).getName()); } return layout; } } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/models/ChooseFristInfo.java package com.aiton.bamin.changtukepiao.Cdachezuche.models; import java.io.Serializable; /** * Created by Administrator on 2016/3/22. */ public class ChooseFristInfo implements Serializable { private String unitOfAccount; private String cityName; private long getCarTime; private long returnCarTime; private int hasDriver; private int driverID; private int carType; private int carID; private TypeCarListInfo.ContainsEntity.CarEntity car; private TypeCarListInfo.ContainsEntity.PlanEntity plan; public ChooseFristInfo(String unitOfAccount, String cityName, long getCarTime, long returnCarTime, int hasDriver, int driverID, int carType, int carID,TypeCarListInfo.ContainsEntity.CarEntity car,TypeCarListInfo.ContainsEntity.PlanEntity plan) { this.unitOfAccount = unitOfAccount; this.cityName = cityName; this.getCarTime = getCarTime; this.returnCarTime = returnCarTime; this.hasDriver = hasDriver; this.driverID = driverID; this.carType = carType; this.carID = carID; this.car = car; this.plan = plan; } public String getUnitOfAccount() { return unitOfAccount; } public void setUnitOfAccount(String unitOfAccount) { this.unitOfAccount = unitOfAccount; } public String getCityName() { return cityName; } public void setCityName(String cityName) { this.cityName = cityName; } public long getGetCarTime() { return getCarTime; } public void setGetCarTime(long getCarTime) { this.getCarTime = getCarTime; } public long getReturnCarTime() { return returnCarTime; } public void setReturnCarTime(long returnCarTime) { this.returnCarTime = returnCarTime; } public int getHasDriver() { return hasDriver; } public void setHasDriver(int hasDriver) { this.hasDriver = hasDriver; } public int getDriverID() { return driverID; } public void setDriverID(int driverID) { this.driverID = driverID; } public int getCarType() { return carType; } public void setCarType(int carType) { this.carType = carType; } public int getCarID() { return carID; } public void setCarID(int carID) { this.carID = carID; } public TypeCarListInfo.ContainsEntity.CarEntity getCar() { return car; } public void setCar(TypeCarListInfo.ContainsEntity.CarEntity car) { this.car = car; } public TypeCarListInfo.ContainsEntity.PlanEntity getPlan() { return plan; } public void setPlan(TypeCarListInfo.ContainsEntity.PlanEntity plan) { this.plan = plan; } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Gkuaidibao/activity/KuaiDiDetailActivity.java package com.aiton.bamin.changtukepiao.Gkuaidibao.activity; import android.content.Intent; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.KeyEvent; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.ListView; import android.widget.TextView; import com.aiton.bamin.changtukepiao.Gkuaidibao.model.KuaiDiInfo; import com.aiton.bamin.changtukepiao.R; import java.text.SimpleDateFormat; import java.util.List; public class KuaiDiDetailActivity extends AppCompatActivity { private ListView mListView_kuaidi; private KuaiDiInfo mKuaiDiInfo; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_kuai_di_detail); initIntent(); findID(); } private void initIntent() { Intent intent = getIntent(); SimpleDateFormat sdf =new SimpleDateFormat("yyyy-MM-dd HH-mm-ss"); mKuaiDiInfo = (KuaiDiInfo) intent.getSerializableExtra("kuaiDiInfo"); List<KuaiDiInfo.TracesEntity> traces = mKuaiDiInfo.getTraces(); if (traces.size()==0){ traces.add(new KuaiDiInfo.TracesEntity(sdf.format(System.currentTimeMillis()),"此单无物流信息")); } } private void findID() { mListView_kuaidi = (ListView) findViewById(R.id.listView_kuaidi); mListView_kuaidi.setAdapter(new MyAdapter()); } class MyAdapter extends BaseAdapter { @Override public int getCount() { return mKuaiDiInfo.getTraces().size(); } @Override public Object getItem(int position) { return null; } @Override public long getItemId(int position) { return 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { View inflate = getLayoutInflater().inflate(R.layout.kuaidi_item, null); TextView textView_detial = (TextView) inflate.findViewById(R.id.textView_detial); TextView textView_time = (TextView) inflate.findViewById(R.id.textView_time); List<KuaiDiInfo.TracesEntity> traces = mKuaiDiInfo.getTraces(); textView_detial.setText(traces.get(position).getAcceptStation()); textView_time.setText(traces.get(position).getAcceptTime()); return inflate; } } public void back(View view) { finish(); AnimFromRightToLeftOUT(); } /** * 从右往左结束动画 */ private void AnimFromRightToLeftOUT() { overridePendingTransition(R.anim.fade_in, R.anim.push_left_out); } public boolean onKeyDown(int keyCode, android.view.KeyEvent event) { if (keyCode == KeyEvent.KEYCODE_BACK) { finish(); AnimFromRightToLeftOUT(); } return super.onKeyDown(keyCode, event); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Dchihewanle/BActivity.java package com.aiton.bamin.changtukepiao.Dchihewanle; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.widget.Toast; import com.aiton.bamin.changtukepiao.R; public class BActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_b); } public void reyingyingpian(View v) { Toast.makeText(BActivity.this, "跳转到该影片详情页面", Toast.LENGTH_SHORT).show(); } public void items_yingpian(View v) { Toast.makeText(BActivity.this, "跳转到该影片的团购详情页面", Toast.LENGTH_SHORT).show(); } public void back(View v) { finish(); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/DaCheZuCheFragment/OrderViewPagerFagment/DaiJiaOrderFragment.java package com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZuCheFragment.OrderViewPagerFagment; import android.os.Bundle; import android.support.v4.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.ListView; import com.aiton.bamin.changtukepiao.R; /** * A simple {@link Fragment} subclass. */ public class DaiJiaOrderFragment extends Fragment { private View mInflate; private ListView listView_daijia; public DaiJiaOrderFragment() { // Required empty public constructor } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment if (mInflate==null){ mInflate = inflater.inflate(R.layout.fragment_dai_jia, null); findID(); initUI(); } //缓存的rootView需要判断是否已经被加过parent, 如果有parent需要从parent删除,要不然会发生这个rootview已经有parent的错误。 ViewGroup parent = (ViewGroup) mInflate.getParent(); if (parent != null) { parent.removeView(mInflate); } return mInflate; } private void initUI() { listView_daijia.setAdapter(new MyAdapter()); } private void findID() { listView_daijia = (ListView) mInflate.findViewById(R.id.listView_daijia); } class MyAdapter extends BaseAdapter { @Override public int getCount() { return 8; } @Override public Object getItem(int position) { return null; } @Override public long getItemId(int position) { return 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { View inflate = getLayoutInflater(getArguments()).inflate(R.layout.dache_order_listitem, null); return inflate; } } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Ddaibanpaotui/activity_daibanpaotui/DaiJiaActivity.java package com.aiton.bamin.changtukepiao.Ddaibanpaotui.activity_daibanpaotui; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import com.aiton.bamin.changtukepiao.R; public class DaiJiaActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_dai_jia); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/ZiJiaZuChe/ZiJiaZuCheActivity.java package com.aiton.bamin.changtukepiao.Cdachezuche.ZiJiaZuChe; import android.content.Intent; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.widget.TextView; import android.widget.Toast; import com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZhuCheActivity.StoresMapActivity; import com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZhuCheActivity.ZuCheChooseCityActivity; import com.aiton.bamin.changtukepiao.Cdachezuche.constant_dachezuche.ConstantDaCheZuChe; import com.aiton.bamin.changtukepiao.Cdachezuche.models.ZiJiaZuCheChooseCityDate; import com.aiton.bamin.changtukepiao.R; import com.github.jjobes.slidedatetimepicker.SlideDateTimeListener; import com.github.jjobes.slidedatetimepicker.SlideDateTimePicker; import java.text.SimpleDateFormat; import java.util.Date; public class ZiJiaZuCheActivity extends AppCompatActivity implements View.OnClickListener { private TextView mTextView_take_car_city; private TextView textView_returnCarCity; private TextView textView_takeCarStore; private TextView textView_returnCarStore; private SimpleDateFormat mDateFormat = new SimpleDateFormat("MM-dd"); private SimpleDateFormat mTimeFormat = new SimpleDateFormat("EE HH:mm"); //取车position private int takeCarCityPosition = 1; //还车position private int returnCarCityPosition = 1; private SlideDateTimeListener StartDateTimePickerListener = new SlideDateTimeListener() { @Override public void onDateTimeSet(Date date) { boolean before = date.before(mCurrentDate); if (!before) { mCurrentDate = date; mStartDate = mDateFormat.format(date); textView_startDate.setText(mStartDate); mStartTime = mTimeFormat.format(date); textView_startTime.setText(mStartTime); } else { Toast.makeText(ZiJiaZuCheActivity.this, "预留两小时取车", Toast.LENGTH_SHORT).show(); } } }; private SlideDateTimeListener EndDateTimePickerListener = new SlideDateTimeListener() { @Override public void onDateTimeSet(Date date) { boolean before = date.before(OneDayDate); if (!before) { mReturnDate = date; mEndDate = mDateFormat.format(date); textView_endDate.setText(mEndDate); mEndTime = mTimeFormat.format(date); textView_endTime.setText(mEndTime); mDayCounts = (int) ((date.getTime() - mCurrentDate.getTime()) / (24L * 3600L * 1000L)); long leftTime = (date.getTime() - mCurrentDate.getTime()) % (24L * 3600L * 1000L); int leftHour = (int) (leftTime / (3600L * 1000L)); // 多出一个小时外才多算一天 if (leftHour < 1) { } else { mDayCounts = mDayCounts + 1; } textView_dayCounts.setText((mDayCounts) + "天"); } else { Toast.makeText(ZiJiaZuCheActivity.this, "至少租车一天", Toast.LENGTH_SHORT).show(); } } }; private TextView textView_startDate; private String mStartDate; private String mStartTime; private String mEndDate; private String mEndTime; private TextView textView_startTime; private Date mCurrentDate; private Date mReturnDate; private TextView textView_endDate; private TextView textView_endTime; private TextView textView_dayCounts; private Date OneDayDate; //默认租车两天 private int mDayCounts = 2; private String mTakeCarStore = "三明客运中心店"; private String mReturenCarStore = "三明客运中心店"; private String mTakeCarCity = "三明"; private String mReturnCarCity = "三明"; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_zi_jia_zu_che); findID(); initTime(); initUI(); setListener(); } private void initTime() { // 默认取车时间推迟两小时 long currentTimeMillis = System.currentTimeMillis() + 2 * 3600 * 1000; mCurrentDate = new Date(currentTimeMillis); mStartDate = mDateFormat.format(mCurrentDate); mStartTime = mTimeFormat.format(mCurrentDate); mReturnDate = new Date(currentTimeMillis + 48 * 3600 * 1000); OneDayDate = new Date(currentTimeMillis + 24 * 3600 * 1000); mEndDate = mDateFormat.format(mReturnDate); mEndTime = mTimeFormat.format(mReturnDate); } private void initUI() { textView_startDate.setText(mStartDate); textView_startTime.setText(mStartTime); textView_endDate.setText(mEndDate); textView_endTime.setText(mEndTime); mTextView_take_car_city.setText(mTakeCarCity); textView_returnCarCity.setText(mReturnCarCity); textView_takeCarStore.setText(mTakeCarStore); textView_returnCarStore.setText(mReturenCarStore); } private void findID() { mTextView_take_car_city = (TextView) findViewById(R.id.textView_take_car_city); textView_returnCarCity = (TextView) findViewById(R.id.textView_returnCarCity); textView_takeCarStore = (TextView) findViewById(R.id.textView_takeCarStore); textView_returnCarStore = (TextView) findViewById(R.id.textView_returnCarStore); textView_startDate = (TextView) findViewById(R.id.textView_startDate); textView_startTime = (TextView) findViewById(R.id.textView_startTime); textView_endDate = (TextView) findViewById(R.id.textView_endDate); textView_endTime = (TextView) findViewById(R.id.textView_endTime); textView_dayCounts = (TextView) findViewById(R.id.textView_dayCounts); } private void setListener() { findViewById(R.id.button_lijixuanche).setOnClickListener(this); findViewById(R.id.imageView_back).setOnClickListener(this); findViewById(R.id.rela_take_car_city).setOnClickListener(this); findViewById(R.id.rela_return_car_city).setOnClickListener(this); findViewById(R.id.rela_zijiazuche_takecar_store).setOnClickListener(this); findViewById(R.id.rela_zijiazuche_returncar_store).setOnClickListener(this); findViewById(R.id.linear_startTime).setOnClickListener(this); findViewById(R.id.linear_endDate).setOnClickListener(this); } @Override protected void onActivityResult(int requestCode, int resultCode, Intent data) { super.onActivityResult(requestCode, resultCode, data); if (data != null) { if (requestCode == ConstantDaCheZuChe.RequestCode.ZIJIAZUCHE_TAKE_CAR_CITY && resultCode == ConstantDaCheZuChe.ResultCode.CHOOSE_CITY) { mTakeCarCity = data.getStringExtra(ConstantDaCheZuChe.IntentKey.CHOOSE_CITY); mTextView_take_car_city.setText(mTakeCarCity); } if (requestCode == ConstantDaCheZuChe.RequestCode.ZIJIAZUCHE_RETURN_CAR_CITY && resultCode == ConstantDaCheZuChe.ResultCode.CHOOSE_CITY) { mReturnCarCity = data.getStringExtra(ConstantDaCheZuChe.IntentKey.CHOOSE_CITY); textView_returnCarCity.setText(mReturnCarCity); } if (requestCode == ConstantDaCheZuChe.RequestCode.JIGOUZUCHE_TAKE_CAR_MAP && resultCode == ConstantDaCheZuChe.ResultCode.CHOOSE_STORE) { mTakeCarStore = data.getStringExtra(ConstantDaCheZuChe.IntentKey.STORES_MAP_KEY); takeCarCityPosition=data.getIntExtra(ConstantDaCheZuChe.IntentKey.STORES_ID_KEY,1); textView_takeCarStore.setText(mTakeCarStore); } if (requestCode == ConstantDaCheZuChe.RequestCode.JIGOUZUCHE_RETURN_CAR_MAP && resultCode == ConstantDaCheZuChe.ResultCode.CHOOSE_STORE) { mReturenCarStore = data.getStringExtra(ConstantDaCheZuChe.IntentKey.STORES_MAP_KEY); returnCarCityPosition=data.getIntExtra(ConstantDaCheZuChe.IntentKey.STORES_ID_KEY,1); textView_returnCarStore.setText(mReturenCarStore); } } } @Override public void onClick(View v) { Intent intent = new Intent(); switch (v.getId()) { case R.id.button_lijixuanche: ZiJiaZuCheChooseCityDate ziJiaZuCheChooseCityDate = new ZiJiaZuCheChooseCityDate(mCurrentDate.getTime(), mReturnDate.getTime(), takeCarCityPosition, returnCarCityPosition, mTakeCarStore, mReturenCarStore, mDayCounts, mStartDate, mStartTime, mEndDate, mEndTime); intent.setClass(this, ZuCheChooseCarTypeActivity.class); intent.putExtra("ZiJiaZuCheChooseCityDate", ziJiaZuCheChooseCityDate); startActivity(intent); break; case R.id.linear_startTime: //默认推迟两小时 mCurrentDate = new Date(System.currentTimeMillis() + 2 * 3600 * 1000); new SlideDateTimePicker.Builder(getSupportFragmentManager()).setListener(StartDateTimePickerListener).setInitialDate(mCurrentDate) // .setMinDate(minDate) // .setMaxDate(maxDate) .setIs24HourTime(true) // .setTheme(SlideDateTimePicker.HOLO_DARK) // .setIndicatorColor(Color.parseColor("#990000")) .build().show(); break; case R.id.linear_endDate: new SlideDateTimePicker.Builder(getSupportFragmentManager()).setListener(EndDateTimePickerListener).setInitialDate(mReturnDate) // .setMinDate(minDate) // .setMaxDate(maxDate) .setIs24HourTime(true) // .setTheme(SlideDateTimePicker.HOLO_DARK) // .setIndicatorColor(Color.parseColor("#990000")) .build().show(); break; case R.id.imageView_back: finish(); break; case R.id.rela_take_car_city: //跳转到城市选择列表界面 intent.setClass(this, ZuCheChooseCityActivity.class); startActivityForResult(intent, ConstantDaCheZuChe.RequestCode.ZIJIAZUCHE_TAKE_CAR_CITY); break; case R.id.rela_return_car_city: //跳转到城市选择列表界面 intent.setClass(this, ZuCheChooseCityActivity.class); startActivityForResult(intent, ConstantDaCheZuChe.RequestCode.ZIJIAZUCHE_RETURN_CAR_CITY); break; case R.id.rela_zijiazuche_takecar_store: //跳转到门店地图选择界面 intent.setClass(this, StoresMapActivity.class); intent.putExtra(ConstantDaCheZuChe.IntentKey.CITY, mTakeCarCity); intent.putExtra(ConstantDaCheZuChe.IntentKey.GET_MAP_LOC_KEY,ConstantDaCheZuChe.IntentKey.GET_MAP_LOC_GET); startActivityForResult(intent, ConstantDaCheZuChe.RequestCode.JIGOUZUCHE_TAKE_CAR_MAP); break; case R.id.rela_zijiazuche_returncar_store: //跳转到门店地图选择界面 intent.setClass(this, StoresMapActivity.class); intent.putExtra(ConstantDaCheZuChe.IntentKey.CITY, mReturnCarCity); intent.putExtra(ConstantDaCheZuChe.IntentKey.GET_MAP_LOC_KEY,ConstantDaCheZuChe.IntentKey.GET_MAP_LOC_RETURN); startActivityForResult(intent, ConstantDaCheZuChe.RequestCode.JIGOUZUCHE_RETURN_CAR_MAP); break; } } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Abusline/busline_aiton/MapActivity.java package com.aiton.bamin.changtukepiao.Abusline.busline_aiton; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import com.baidu.location.BDLocation; import com.baidu.location.BDLocationListener; import com.baidu.location.LocationClient; import com.baidu.location.LocationClientOption; import com.baidu.mapapi.map.BaiduMap; import com.baidu.mapapi.map.BitmapDescriptor; import com.baidu.mapapi.map.BitmapDescriptorFactory; import com.baidu.mapapi.map.MapStatusUpdate; import com.baidu.mapapi.map.MapStatusUpdateFactory; import com.baidu.mapapi.map.MapView; import com.baidu.mapapi.map.MyLocationConfiguration; import com.baidu.mapapi.map.MyLocationData; import com.baidu.mapapi.map.OverlayOptions; import com.baidu.mapapi.map.TextOptions; import com.baidu.mapapi.model.LatLng; import com.aiton.bamin.changtukepiao.R; public class MapActivity extends AppCompatActivity implements View.OnClickListener { private BaiduMap mBaiduMap; private MapView mBmapView; //定位相关 public LocationClient mLocationClient = null; public BDLocationListener myListener = new MyLocationListener(); private boolean isFirstIn = true; private double mLatitude; private double mLongitude; private String addressStr; //自定义定位图标 private BitmapDescriptor mIconLocation; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_map); initUI(); initLocation(); setListener(); } private void setListener() { findViewById(R.id.back).setOnClickListener(this); } private void initUI() { mBmapView = (MapView) findViewById(R.id.bmapView); mBaiduMap = mBmapView.getMap(); //构造一个更新地图的msu对象,然后设置该对象为缩放等级(比例尺),最后设置地图状态。 MapStatusUpdate msu = MapStatusUpdateFactory.zoomTo(18.0f); mBaiduMap.setMapStatus(msu); } private void initLocation() { mLocationClient = new LocationClient(getApplicationContext()); //声明LocationClient类 mLocationClient.registerLocationListener(myListener); //注册监听函数 LocationClientOption option = new LocationClientOption(); option.setLocationMode(LocationClientOption.LocationMode.Hight_Accuracy);//可选,默认高精度,设置定位模式,高精度,低功耗,仅设备 option.setCoorType("bd09ll");//可选,默认gcj02,设置返回的定位结果坐标系 int span = 5000; option.setScanSpan(span);//可选,默认0,即仅定位一次,设置发起定位请求的间隔需要大于等于1000ms才是有效的 option.setIsNeedAddress(true);//可选,设置是否需要地址信息,默认不需要 option.setOpenGps(true);//可选,默认false,设置是否使用gps option.setLocationNotify(true);//可选,默认false,设置是否当gps有效时按照1S1次频率输出GPS结果 option.setIsNeedLocationDescribe(true);//可选,默认false,设置是否需要位置语义化结果,可以在BDLocation.getLocationDescribe里得到,结果类似于“在北京天安门附近” option.setIsNeedLocationPoiList(true);//可选,默认false,设置是否需要POI结果,可以在BDLocation.getPoiList里得到 option.setIgnoreKillProcess(false);//可选,默认false,定位SDK内部是一个SERVICE,并放到了独立进程,设置是否在stop的时候杀死这个进程,默认杀死 option.SetIgnoreCacheException(false);//可选,默认false,设置是否收集CRASH信息,默认收集 option.setEnableSimulateGps(false);//可选,默认false,设置是否需要过滤gps仿真结果,默认需要 mLocationClient.setLocOption(option); //初始化定位的图标 mIconLocation = BitmapDescriptorFactory.fromResource(R.mipmap.ico_location_big_highlight_map); } @Override public void onStart() { super.onStart(); // 开启定位图层 mBaiduMap.setMyLocationEnabled(true); //开启定位 if (!mLocationClient.isStarted()) { mLocationClient.start(); } } @Override public void onStop() { super.onStop(); //停止定位图层 mBaiduMap.setMyLocationEnabled(false); //停止定位 mLocationClient.stop(); } @Override public void onDestroy() { super.onDestroy(); //在activity执行onDestroy时执行mMapView.onDestroy(),实现地图生命周期管理 mBmapView.onDestroy(); } @Override public void onResume() { super.onResume(); //在activity执行onResume时执行mMapView. onResume (),实现地图生命周期管理 mBmapView.onResume(); } @Override public void onPause() { super.onPause(); //在activity执行onPause时执行mMapView. onPause (),实现地图生命周期管理 mBmapView.onPause(); } @Override public void onClick(View v) { switch (v.getId()) { case R.id.back: finish(); break; } } // 定位的监听回调 private class MyLocationListener implements BDLocationListener { @Override public void onReceiveLocation(BDLocation bdLocation) { // 构造定位数据 MyLocationData data = new MyLocationData.Builder() .accuracy(bdLocation.getRadius())//获得半径 .latitude(bdLocation.getLatitude())//获得经度 .longitude(bdLocation.getLongitude())//获得纬度 .build(); //设置定位数据 mBaiduMap.setMyLocationData(data); //设置自定义图标 MyLocationConfiguration config = new MyLocationConfiguration(MyLocationConfiguration.LocationMode.NORMAL, true, mIconLocation); mBaiduMap.setMyLocationConfigeration(config); //初始化经纬度 mLatitude = bdLocation.getLatitude(); mLongitude = bdLocation.getLongitude(); //第一次进入,定位到所在位置 if (isFirstIn) { LatLng latLng = new LatLng(bdLocation.getLatitude(), bdLocation.getLongitude()); MapStatusUpdate msu = MapStatusUpdateFactory.newLatLng(latLng); mBaiduMap.animateMapStatus(msu); isFirstIn = false; addressStr = bdLocation.getAddrStr(); LatLng llText = new LatLng(mLatitude, mLongitude); //构建文字Option对象,用于在地图上添加文字 OverlayOptions textOption = new TextOptions() .fontSize(24) .fontColor(0xFF000000) .text(addressStr) .position(llText); //在地图上添加该文字对象并显示 mBaiduMap.addOverlay(textOption); } } } } <file_sep>/八闽出行/build.gradle apply plugin: 'com.android.application' android { compileSdkVersion 23 buildToolsVersion '23.0.1' defaultConfig { applicationId "com.aiton.bamin.changtukepiao" minSdkVersion 15 targetSdkVersion 23 versionCode 17 versionName "1.1.6" manifestPlaceholders = [ //个人推送相关信息 GETUI_APP_ID : "QGOf2m2g5b8qaAyYHmKZx2", GETUI_APP_KEY : "<KEY>", GETUI_APP_SECRET: "<KEY>", PACKAGE_NAME : applicationId ] } buildTypes { release { minifyEnabled true proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' } } } buildscript { repositories { mavenCentral() } dependencies { // classpath 'com.android.tools.build:gradle:' + gradle build tool version } } dependencies { compile fileTree(include: ['*.jar'], dir: 'libs') compile project(':SMSSDK') compile files('libs/dom4j-1.6.1.jar') compile project(':circleimageview') compile files('libs/alipaySdk-20160120.jar') compile project(':library') compile files('libs/BaiduLBS_Android.jar') compile project(':lib_slidingmenu') compile project(':CheckBox') compile project(':slideDateTimePicker') compile files('libs/zxing.jar') compile files('libs/libammsdk.jar') compile 'com.android.support:appcompat-v7:23.1.1' compile 'com.android.support:support-v4:23.1.1' compile 'com.umeng.analytics:analytics:latest.integration' compile project(':lib_shane') compile 'com.getui:sdk:2.7.0.0' compile 'com.getui:ext:2.0.3' compile files('libs/alisdk-ut-5.jar') compile(name: 'feedbackSdk', ext: 'aar') //主反馈功能 compile files('libs/securityguard-3.1.27.jar') //提供数据统计支持 compile files('libs/alisdk-ut-5.jar') //提供数据统计支持 compile project(':library_refreash') compile files('libs/butterknife-7.0.1.jar') } allprojects { repositories { jcenter() maven { url "http://mvn.gt.igexin.com/nexus/content/repositories/releases/" } } } repositories { mavenCentral() flatDir { dirs 'libs' } } android { packagingOptions { // exclude 'META-INF/DEPENDENCIES' // exclude 'META-INF/NOTICE' // exclude 'META-INF/LICENSE' exclude 'META-INF/LICENSE.txt' exclude 'META-INF/NOTICE.txt' } // ... } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Hbaoxianchaoshi/model/SlidingMenuListviewInfo.java package com.aiton.bamin.changtukepiao.Hbaoxianchaoshi.model; /** * Created by zjb on 2016/1/29. */ public class SlidingMenuListviewInfo { private int imgRes; private String str; public int getImgRes() { return imgRes; } public void setImgRes(int imgRes) { this.imgRes = imgRes; } public String getStr() { return str; } public void setStr(String str) { this.str = str; } public SlidingMenuListviewInfo(int imgRes, String str) { this.imgRes = imgRes; this.str = str; } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Hbaoxianchaoshi/SetActivity.java package com.aiton.bamin.changtukepiao.Hbaoxianchaoshi; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import com.aiton.bamin.changtukepiao.R; public class SetActivity extends AppCompatActivity implements View.OnClickListener { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_set); setListener(); } private void setListener() { findViewById(R.id.back).setOnClickListener(this); } @Override public void onClick(View v) { switch (v.getId()){ case R.id.back: finish(); break; } } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Dchihewanle/MainCheHeWanLeActivity.java package com.aiton.bamin.changtukepiao.Dchihewanle; import android.content.Intent; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.widget.LinearLayout; import android.widget.Toast; import com.aiton.bamin.changtukepiao.R; import java.util.ArrayList; import java.util.List; public class MainCheHeWanLeActivity extends AppCompatActivity { private Intent intent = new Intent(); private List<Class> mList = new ArrayList<Class>(); private LinearLayout mLl_bg; private LinearLayout mLl_for_tab01; private LinearLayout mLl_for_tab02_and_tab03; private LinearLayout mLl_for_tab04; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main_chihewanle); mList.add(AActivity.class); mList.add(BActivity.class); mList.add(CActivity.class); mList.add(DActivity.class); mList.add(EActivity.class); mList.add(FActivity.class); mList.add(GActivity.class); mList.add(HActivity.class); mList.add(IActivity.class); mList.add(JActivity.class); mList.add(KActivity.class); mList.add(LActivity.class); mLl_bg = (LinearLayout) findViewById(R.id.ll_bg); mLl_for_tab01 = (LinearLayout) findViewById(R.id.ll_for_tab01); mLl_for_tab02_and_tab03 = (LinearLayout) findViewById(R.id.ll_for_tab02_and_tab03); mLl_for_tab04 = (LinearLayout) findViewById(R.id.ll_for_tab04); mLl_for_tab01.setVisibility(View.VISIBLE); mLl_for_tab02_and_tab03.setVisibility(View.GONE); mLl_for_tab04.setVisibility(View.GONE); } public void meishi(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(0)); startActivity(intent); } public void meiyuan(View v) { intent.setClass(MainCheHeWanLeActivity.this, MeiYuanActivity.class); startActivity(intent); } public void dianying(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(1)); startActivity(intent); } public void jiudian(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(2)); startActivity(intent); } public void waimai(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(3)); startActivity(intent); } public void KTV(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(4)); startActivity(intent); } public void didaomeishi(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(5)); startActivity(intent); } public void meinv(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(6)); startActivity(intent); } public void yundongjianshen(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(7)); startActivity(intent); } public void zuyuanmo(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(8)); startActivity(intent); } public void xianhuadangao(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(9)); startActivity(intent); } public void jingdianmenpiao(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(10)); startActivity(intent); } public void more(View v) { intent.setClass(MainCheHeWanLeActivity.this, mList.get(11)); startActivity(intent); } public void btn_detail(View v) { Toast.makeText(MainCheHeWanLeActivity.this, "跳转到商家详情页面", Toast.LENGTH_SHORT).show(); } public void login_out(View v) { mLl_bg.setBackgroundResource(R.mipmap.a05); } public void my_about(View v) { Toast.makeText(MainCheHeWanLeActivity.this, "跳转到优惠券及常见问题相关页面", Toast.LENGTH_SHORT).show(); } public void my_order(View v) { Toast.makeText(MainCheHeWanLeActivity.this, "跳转到我的订单详情", Toast.LENGTH_SHORT).show(); } public void tab01(View v) { mLl_for_tab01.setVisibility(View.VISIBLE); mLl_for_tab02_and_tab03.setVisibility(View.GONE); mLl_for_tab04.setVisibility(View.GONE); mLl_bg.setBackgroundResource(R.mipmap.a011); } public void tab02(View v) { mLl_for_tab01.setVisibility(View.GONE); mLl_for_tab02_and_tab03.setVisibility(View.VISIBLE); mLl_for_tab04.setVisibility(View.GONE); mLl_bg.setBackgroundResource(R.mipmap.a022); } public void tab03(View v) { mLl_for_tab01.setVisibility(View.GONE); mLl_for_tab02_and_tab03.setVisibility(View.VISIBLE); mLl_for_tab04.setVisibility(View.GONE); mLl_bg.setBackgroundResource(R.mipmap.a03); } public void tab04(View v) { mLl_for_tab01.setVisibility(View.GONE); mLl_for_tab02_and_tab03.setVisibility(View.GONE); mLl_for_tab04.setVisibility(View.VISIBLE); mLl_bg.setBackgroundResource(R.mipmap.a04); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Abusline/busline_aiton/MainBusLineActivity.java package com.aiton.bamin.changtukepiao.Abusline.busline_aiton; import android.os.Bundle; import android.support.v4.app.FragmentTabHost; import android.support.v7.app.AppCompatActivity; import android.view.View; import com.aiton.bamin.changtukepiao.R; import com.aiton.bamin.changtukepiao.Abusline.busline_aition_fragment.OffRemindFragment; import com.aiton.bamin.changtukepiao.Abusline.busline_aition_fragment.RealTimeRemoteFragment; import com.aiton.bamin.changtukepiao.Abusline.busline_aition_fragment.RechargeFragment; import com.aiton.bamin.changtukepiao.Abusline.busline_aition_fragment.RoutePlaneFragment; public class MainBusLineActivity extends AppCompatActivity { private FragmentTabHost mTabHost; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main_busline); mTabHost = (FragmentTabHost) findViewById(android.R.id.tabhost); mTabHost.setup(this, getSupportFragmentManager(), R.id.realtabcontent); View tab_menu_layout_nearby = getLayoutInflater().inflate(R.layout.tab_menu_layout_transfer, null); View tab_menu_layout_collection = getLayoutInflater().inflate(R.layout.tab_menu_layout_nearby, null); View tab_menu_layout_transfer = getLayoutInflater().inflate(R.layout.tab_menu_layout_collection, null); View tab_menu_layout_me = getLayoutInflater().inflate(R.layout.tab_menu_layout_me, null); mTabHost.addTab(mTabHost.newTabSpec("simple").setIndicator(tab_menu_layout_nearby), RoutePlaneFragment.class, null); mTabHost.addTab(mTabHost.newTabSpec("contacts").setIndicator(tab_menu_layout_collection), RealTimeRemoteFragment.class, null); mTabHost.addTab(mTabHost.newTabSpec("custom").setIndicator(tab_menu_layout_transfer), RechargeFragment.class, null); mTabHost.addTab(mTabHost.newTabSpec("throttle").setIndicator(tab_menu_layout_me), OffRemindFragment.class, null); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Ddaibanpaotui/activity_daibanpaotui/PaoTuiActivity.java package com.aiton.bamin.changtukepiao.Ddaibanpaotui.activity_daibanpaotui; import android.content.Intent; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import com.aiton.bamin.changtukepiao.R; public class PaoTuiActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_pao_tui); } public void paotuishungou(View view){ Intent intent=new Intent(); intent.setClass(PaoTuiActivity.this, PaoTuiShunGouActivity.class); startActivity(intent); } public void paotuiqusong(View view){ Intent intent=new Intent(); intent.setClass(PaoTuiActivity.this, PaoTuiQuSongActivity.class); startActivity(intent); } public void paotuipaidui(View view){ Intent intent=new Intent(); intent.setClass(PaoTuiActivity.this, PaoTuiPaiDuiActivity.class); startActivity(intent); } public void sirendingzhi(View view){ Intent intent=new Intent(); intent.setClass(PaoTuiActivity.this, PaoTuiQuSongActivity.class); startActivity(intent); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Ddaibanpaotui/model/DaBanPaoTuiGridViewItemInfo.java package com.aiton.bamin.changtukepiao.Ddaibanpaotui.model; /** * Created by Administrator on 2016/3/18. */ public class DaBanPaoTuiGridViewItemInfo { String name; int iconId; public DaBanPaoTuiGridViewItemInfo() { } public DaBanPaoTuiGridViewItemInfo(String name, int iconId) { this.name = name; this.iconId = iconId; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getIconId() { return iconId; } public void setIconId(int iconId) { this.iconId = iconId; } @Override public String toString() { return "DaBanPaoTuiGridViewItemInfo{" + "name='" + name + '\'' + ", iconId=" + iconId + '}'; } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Dchihewanle/EActivity.java package com.aiton.bamin.changtukepiao.Dchihewanle; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.widget.Toast; import com.aiton.bamin.changtukepiao.R; public class EActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_e); } public void btn_back(View v) { finish(); } public void btnbaidu(View v) { Toast.makeText(EActivity.this,"跳转至商品详情页面",Toast.LENGTH_SHORT).show(); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Abusline/busline_aiton/InputLocActivity.java package com.aiton.bamin.changtukepiao.Abusline.busline_aiton; import android.content.Intent; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.BaseAdapter; import android.widget.EditText; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.ListView; import android.widget.TextView; import com.aiton.bamin.changtukepiao.Abusline.busline_aition_constants.ConstantBusLine; import com.baidu.mapapi.search.core.PoiInfo; import com.baidu.mapapi.search.core.SearchResult; import com.baidu.mapapi.search.poi.OnGetPoiSearchResultListener; import com.baidu.mapapi.search.poi.PoiCitySearchOption; import com.baidu.mapapi.search.poi.PoiDetailResult; import com.baidu.mapapi.search.poi.PoiResult; import com.baidu.mapapi.search.poi.PoiSearch; import com.aiton.bamin.changtukepiao.R; import java.util.ArrayList; import java.util.List; public class InputLocActivity extends AppCompatActivity implements View.OnClickListener { private TextView mInput_cancle; private ImageView mIv_search; private EditText mIntput_edit; /*----百度公交搜索相关----*/ private PoiSearch mSearch; private LinearLayout mLl_for_loading; private TextView mTv_results_suggest; private ListView mLv_search_result; private List<PoiInfo> mAllPoi = new ArrayList<PoiInfo>(); private MyAdapter mAdapter; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_input_loc); initUI(); setListener(); initBaiDu(); } private void initBaiDu() { mSearch = PoiSearch.newInstance(); } private void setListener() { mInput_cancle.setOnClickListener(this); mIv_search.setOnClickListener(this); } private void initUI() { mInput_cancle = (TextView) findViewById(R.id.input_cancle); mIntput_edit = (EditText) findViewById(R.id.input_edit); Intent intent = getIntent(); String intputType = intent.getStringExtra(ConstantBusLine.IntentKey.INPUT_TYPE_KEY); if ("myLoc".equals(intputType)) { mIntput_edit.setHint("输入起点"); } else if ("end".equals(intputType)) { mIntput_edit.setHint("输入终点"); } mIv_search = (ImageView) findViewById(R.id.imageView_search); mLl_for_loading = (LinearLayout) findViewById(R.id.ll_for_loading); mTv_results_suggest = (TextView) findViewById(R.id.tv_results_suggest); mLv_search_result = (ListView) findViewById(R.id.lv_search_result); mAdapter = new MyAdapter(); mLv_search_result.setAdapter(mAdapter); mLv_search_result.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { if (mAllPoi != null && mAllPoi.size() > 0) { Intent data = new Intent(); if (mAllPoi.get(position).type == PoiInfo.POITYPE.BUS_LINE) { } if (mAllPoi.get(position).type == PoiInfo.POITYPE.BUS_STATION) { //TODO 跳转到本站公交车所经过的所有公交线路 data.putExtra("choosed",mAllPoi.get(position).name); setResult(ConstantBusLine.ResultCode.CHOOSED, data); finish(); } if (mAllPoi.get(position).type == PoiInfo.POITYPE.POINT) { //TODO 跳转到地图界面,并定位当前位置后显示附近的公交站 data.putExtra("choosed",mAllPoi.get(position).name); setResult(ConstantBusLine.ResultCode.CHOOSED, data); finish(); } } } }); } @Override public void onClick(View v) { switch (v.getId()) { case R.id.input_cancle: finish(); break; case R.id.imageView_search: mLl_for_loading.setVisibility(View.VISIBLE);//显示加载数据提示 mTv_results_suggest.setVisibility(View.GONE);//隐藏无查询结果提示文字 mLv_search_result.setVisibility(View.GONE);//显示查询结果的列表 mSearch.searchInCity(new PoiCitySearchOption().city(ConstantBusLine.Str.CITY).keyword(getUseInput())); mSearch.setOnGetPoiSearchResultListener(new OnGetPoiSearchResultListener() { @Override public void onGetPoiResult(PoiResult poiResult) { if (poiResult == null || poiResult.error != SearchResult.ERRORNO.NO_ERROR) { //显示无查询结果提示文字 mTv_results_suggest.setVisibility(View.VISIBLE); mLl_for_loading.setVisibility(View.GONE); mLv_search_result.setVisibility(View.GONE); return; } mTv_results_suggest.setVisibility(View.GONE);//隐藏无查询结果提示文字 mLl_for_loading.setVisibility(View.GONE);//隐藏数据加载提示 mLv_search_result.setVisibility(View.VISIBLE);//显示查询结果的列表 mAllPoi.clear(); //遍历所有POI,找到类型为公交站的POI,将其添加到容器中 for (int i = 0; i < poiResult.getAllPoi().size(); i++) { if (poiResult.getAllPoi().get(i).type == PoiInfo.POITYPE.BUS_STATION) { mAllPoi.add(poiResult.getAllPoi().get(i)); } } //遍历所有POI,找到类型为位置的POI,将其添加到容器中 for (int i = 0; i < poiResult.getAllPoi().size(); i++) { if (poiResult.getAllPoi().get(i).type == PoiInfo.POITYPE.POINT) { mAllPoi.add(poiResult.getAllPoi().get(i)); } } mAdapter.notifyDataSetChanged(); } @Override public void onGetPoiDetailResult(PoiDetailResult poiDetailResult) { } }); break; } } /*----获取用户输入的信息内容----*/ public String getUseInput() { String s = mIntput_edit.getText().toString(); return s; } class MyAdapter extends BaseAdapter { @Override public int getCount() { return mAllPoi.size(); } @Override public Object getItem(int position) { return null; } @Override public long getItemId(int position) { return 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { View layout = getLayoutInflater().inflate(R.layout.busline_search_station, null); ImageView iv_type = (ImageView) layout.findViewById(R.id.iv_input_type); TextView tv_title = (TextView) layout.findViewById(R.id.tv_input_title); TextView tv_subtitle = (TextView) layout.findViewById(R.id.tv_input_subtitle); if (mAllPoi != null && mAllPoi.size() > 0) { if( mAllPoi.get(position).type == PoiInfo.POITYPE.BUS_STATION) { iv_type.setImageResource(R.mipmap.icon_station); tv_title.setText(mAllPoi.get(position).name); tv_subtitle.setVisibility(View.GONE); } if(mAllPoi.get(position).type == PoiInfo.POITYPE.POINT) { iv_type.setImageResource(R.mipmap.icon_poi); tv_title.setText(mAllPoi.get(position).name); tv_subtitle.setVisibility(View.VISIBLE); tv_subtitle.setText(mAllPoi.get(position).address); } } return layout; } } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/constant_dachezuche/ConstantDaCheZuChe.java package com.aiton.bamin.changtukepiao.Cdachezuche.constant_dachezuche; import com.aiton.bamin.changtukepiao.Zeverything.constant.EverythingConstant; /** * Created by Administrator on 2016/3/14. */ public class ConstantDaCheZuChe { public class URL { //取车城市列表接口地址 传入的参数:page 默认值0 public static final String CITY_LIST = EverythingConstant.HOST + "/bmpw/zc/store/loadcities"; //机构认证服务接口 String code,String password,code为机构编号 password为密码,返回true和false public static final String DACHEZUCHE_COMFIRE_UNIT_INFO = EverythingConstant.HOST + "/bmpw/zc/institutions/checkinstitutions"; //司机列表接口地址 传入的参数:page 默认值0 public static final String DRIVER_LIST = EverythingConstant.HOST + "/bmpw/zc/driver/loadfreedriver"; //获取车辆信息传入参数Integer lei public static final String GET_CAR_INFO = EverythingConstant.HOST + "/bmpw/zc/order/loadcarbylei"; //根据车辆不同类型加载该类型下所有车辆的信息 传入参数 lei=0 page=0 0:公务车 1:商务车 2:执法车 3:越野车 4:皮卡 5:客车 public static final String GET_CAR_LIST = EverythingConstant.HOST + "/bmpw/zc/car/loadcarsbylei"; //获取门店地址Marker显示于地图上 传入String city,城市名 public static final String GET_STORES_MARKERS_LATLNG = EverythingConstant.HOST + "/bmpw/zc/store/loadstorebycity"; //机构租车提交订单 /** * 参数:Integer plan_id;//租赁计划id Timestamp zuchuDate; //租出时间 Timestamp planReturnDate;//计划还车时间 * Double price;//总价 Integer status;//0:企业租车 1;个人租车 Double insurance;//保险金额 Integer getCar;//取车地点 * Integer returnCar;//还车地点 Integer hasDriver;//0:带 1:不带 Integer driverId;//司机的id Integer carId;//汽车的id * Integer lei;//套餐的类型 String institutionsCode;//企业账号 Integer accountId;//用户id */ public static final String COMMIT_ORDER = EverythingConstant.HOST + "/bmpw/zc/order/institutions/addorder"; //企业用车查询订单列表 Integer account_id,Integer page public static final String GET_ORDER_LIST_INSTITUTIONS = EverythingConstant.HOST + "/bmpw/zc/order/institutions/loadbyaccount"; //个人租车查询订单列表 Integer account_id,Integer page public static final String GET_ORDER_LIST_PERSON = EverythingConstant.HOST + "/bmpw/zc/order/person/loadbyaccount"; //个人租车取消订单列表 传入order_id public static final String CANCEL_ORDER = EverythingConstant.HOST + "/bmpw/zc/order/cancelorder"; //查询订单详情 public static final String QUERY_ORDER_DETAIL = EverythingConstant.HOST + "/bmpw/zc/order/details"; } /** * 请求码 */ public class RequestCode { //自驾租车选择取车城市 public static final int ZIJIAZUCHE_TAKE_CAR_CITY = 0; //机构租车选城市 public static final int JIGOUZUCHE_TAKE_CAR_CITY = 1; //选司机 public static final int JIGOUZUCHE_CHOOSE_DRIVER = 2; //取车门店地图 public static final int JIGOUZUCHE_TAKE_CAR_MAP = 3; //还车门店地图 public static final int JIGOUZUCHE_RETURN_CAR_MAP = 4; //自家租车选择还车城市 public static final int ZIJIAZUCHE_RETURN_CAR_CITY = 5; } /** * 返回码 */ public class ResultCode { //选城市 public static final int CHOOSE_CITY = 0; //选司机 public static final int JIGOUZUCHE_CHOOSE_DRIVER = 1; //选门店 public static final int CHOOSE_STORE = 2; } public class IntentKey { //跳转到门店地图_取车城市的KEY public static final String CITY = "city_name"; //为区分还车门店还是取车门店 public static final String GET_MAP_LOC_KEY = "get_map_loc"; //跳转到门店地图_取车的KEY public static final int GET_MAP_LOC_GET = 1; //跳转到门店地图_还车的KEY public static final int GET_MAP_LOC_RETURN = 2; //选择城市地区的KEY public static final String CHOOSE_CITY = "choose_city"; //选择城市地区的pisotion public static final String CHOOSE_CITY_POSITION = "choose_city_position"; //取车门店返回值的KEY public static final String STORES_MAP_KEY = "stores_map_marker_title"; //选择司机的返回值的KEY public static final String DRIVER_NAME = "driverName"; public static final String DRIVER_ID = "driverID"; //第一次选择时要传递的对象值的KEY public static final String CHOOSE_FRIST_INFO = "choose_frist_info"; //门店ID的KEY public static final String STORES_ID_KEY = "storesId"; //机构用车提交订单后返回到订单列表的KEY public static final String BACK_TO_ORDER_LIST_KEY = "ji_guo_zu_che_back_key"; //机构用车提交订单后返回到订单列表的Intent值 public static final int JI_GUO_ZU_CHE_BACK_INT = 11; //自驾用车提交订单后返回到订单列表的Intent值 public static final int ZI_JIA_ZU_CHE_BACK_INT = 12; } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/DaCheZhuCheActivity/DaCheZuCheMainActivity.java package com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZhuCheActivity; import android.content.Intent; import android.os.Bundle; import android.support.v4.app.FragmentTabHost; import android.support.v7.app.AppCompatActivity; import android.view.KeyEvent; import android.view.View; import android.widget.ImageView; import android.widget.TextView; import com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZuCheFragment.MainFragment; import com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZuCheFragment.MoreFragment; import com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZuCheFragment.OrderFragment; import com.aiton.bamin.changtukepiao.Cdachezuche.constant_dachezuche.ConstantDaCheZuChe; import com.aiton.bamin.changtukepiao.R; public class DaCheZuCheMainActivity extends AppCompatActivity { private String[] tabsItem = new String[]{"首页", "订单", "更多"}; private Class[] fragment = new Class[]{MainFragment.class, OrderFragment.class, MoreFragment.class}; private int[] imgRes = new int[]{R.drawable.shouye_selector, R.drawable.dingdan_selector, R.drawable.gengduo_selector}; private FragmentTabHost mTabHost; private int mBackKey; public int viewpagerTabs; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_da_che_zu_che_main); mTabHost = (FragmentTabHost) findViewById(R.id.tabHost); mTabHost.setup(this, getSupportFragmentManager(), R.id.realtab); for (int i = 0; i < imgRes.length; i++) { View inflate = getLayoutInflater().inflate(R.layout.tabs_item, null); TextView tabs_text = (TextView) inflate.findViewById(R.id.tabs_text); ImageView tabs_img = (ImageView) inflate.findViewById(R.id.tabs_img); tabs_text.setText(tabsItem[i]); tabs_img.setImageResource(imgRes[i]); mTabHost.addTab(mTabHost.newTabSpec("" + i).setIndicator(inflate), fragment[i], null); } getIntentForSetCurrentTab(); } public void setViewpagerTabs(int viewpagerTabs){ this.viewpagerTabs=viewpagerTabs; } public boolean onKeyDown(int keyCode, android.view.KeyEvent event) { if (keyCode == KeyEvent.KEYCODE_BACK) { finish(); animFromBigToSmallOUT(); } return super.onKeyDown(keyCode, event); } /** * 从大到小结束动画 */ private void animFromBigToSmallOUT() { overridePendingTransition(R.anim.fade_in, R.anim.big_to_small_fade_out); } private void getIntentForSetCurrentTab() { Intent intent = getIntent(); mBackKey = intent.getIntExtra(ConstantDaCheZuChe.IntentKey.BACK_TO_ORDER_LIST_KEY, -1); if (-1 != mBackKey) { switch (mBackKey) { case ConstantDaCheZuChe.IntentKey.JI_GUO_ZU_CHE_BACK_INT: //机构用车返回的当前【订单列表默认为机构用车,无需设置二级Tab】 break; case ConstantDaCheZuChe.IntentKey.ZI_JIA_ZU_CHE_BACK_INT: viewpagerTabs = 1; //设置当前显示的Tab选项卡 break; } mTabHost.setCurrentTab(1); } } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/models/ZiJiaZuCheOrderDetial.java package com.aiton.bamin.changtukepiao.Cdachezuche.models; import java.io.Serializable; /** * Created by zjb on 2016/3/28. */ public class ZiJiaZuCheOrderDetial implements Serializable{ private int id; private int carId;//车辆编号 private int planId; //租赁计划id(防止车辆绑定plan更改,或者不使用车辆绑定plan) private long zuchuDate; //租出时间 private long huancheDate; //还车时间 private long planReturnDate;//计划还车时间 private double limitMileage;//限制里程数 private int accountId;//租车人id private int guarantorId;//担保人id private double beforeMileage;//开始里程数 private double afterMileage;//结束里程数 private int jijiatime;//计价时间 private double timePrice;//计时租金 private double outMileagePrice;//超程租金 private double outTimePrice;//超时租金 private double zuPrice;//应收租金 private double shouyajin;//实收押金 private double price;//总价 private String note;//备注 private int flag;//订单状态 0:进行中 1:完成 2:取消 3:等待结算(已还车) private int driverId; private int hasDriver;//是否带司机 0:带司机 1:不带司机 private int getCar;//取车地点 private int returnCar;//还车地点 private double advancePayment;//预付款 private long date;//下单时间 private int status;//0:企业租车 1;个人租车 private String sale;//收款人账号 private String institutionsCode;//企业账号 private int hasFranchiseFees;//是否包含不计免赔费用 public int getId() { return id; } public void setId(int id) { this.id = id; } public int getCarId() { return carId; } public void setCarId(int carId) { this.carId = carId; } public int getPlanId() { return planId; } public void setPlanId(int planId) { this.planId = planId; } public long getZuchuDate() { return zuchuDate; } public void setZuchuDate(long zuchuDate) { this.zuchuDate = zuchuDate; } public long getHuancheDate() { return huancheDate; } public void setHuancheDate(long huancheDate) { this.huancheDate = huancheDate; } public long getPlanReturnDate() { return planReturnDate; } public void setPlanReturnDate(long planReturnDate) { this.planReturnDate = planReturnDate; } public double getLimitMileage() { return limitMileage; } public void setLimitMileage(double limitMileage) { this.limitMileage = limitMileage; } public int getAccountId() { return accountId; } public void setAccountId(int accountId) { this.accountId = accountId; } public int getGuarantorId() { return guarantorId; } public void setGuarantorId(int guarantorId) { this.guarantorId = guarantorId; } public double getBeforeMileage() { return beforeMileage; } public void setBeforeMileage(double beforeMileage) { this.beforeMileage = beforeMileage; } public double getAfterMileage() { return afterMileage; } public void setAfterMileage(double afterMileage) { this.afterMileage = afterMileage; } public int getJijiatime() { return jijiatime; } public void setJijiatime(int jijiatime) { this.jijiatime = jijiatime; } public double getTimePrice() { return timePrice; } public void setTimePrice(double timePrice) { this.timePrice = timePrice; } public double getOutMileagePrice() { return outMileagePrice; } public void setOutMileagePrice(double outMileagePrice) { this.outMileagePrice = outMileagePrice; } public double getOutTimePrice() { return outTimePrice; } public void setOutTimePrice(double outTimePrice) { this.outTimePrice = outTimePrice; } public double getZuPrice() { return zuPrice; } public void setZuPrice(double zuPrice) { this.zuPrice = zuPrice; } public double getShouyajin() { return shouyajin; } public void setShouyajin(double shouyajin) { this.shouyajin = shouyajin; } public double getPrice() { return price; } public void setPrice(double price) { this.price = price; } public String getNote() { return note; } public void setNote(String note) { this.note = note; } public int getFlag() { return flag; } public void setFlag(int flag) { this.flag = flag; } public int getDriverId() { return driverId; } public void setDriverId(int driverId) { this.driverId = driverId; } public int getHasDriver() { return hasDriver; } public void setHasDriver(int hasDriver) { this.hasDriver = hasDriver; } public int getGetCar() { return getCar; } public void setGetCar(int getCar) { this.getCar = getCar; } public int getReturnCar() { return returnCar; } public void setReturnCar(int returnCar) { this.returnCar = returnCar; } public double getAdvancePayment() { return advancePayment; } public void setAdvancePayment(double advancePayment) { this.advancePayment = advancePayment; } public long getDate() { return date; } public void setDate(long date) { this.date = date; } public int getStatus() { return status; } public void setStatus(int status) { this.status = status; } public String getSale() { return sale; } public void setSale(String sale) { this.sale = sale; } public String getInstitutionsCode() { return institutionsCode; } public void setInstitutionsCode(String institutionsCode) { this.institutionsCode = institutionsCode; } public int getHasFranchiseFees() { return hasFranchiseFees; } public void setHasFranchiseFees(int hasFranchiseFees) { this.hasFranchiseFees = hasFranchiseFees; } public ZiJiaZuCheOrderDetial(int id, int carId, int planId, long zuchuDate, long huancheDate, long planReturnDate, double limitMileage, int accountId, int guarantorId, double beforeMileage, double afterMileage, int jijiatime, double timePrice, double outMileagePrice, double outTimePrice, double zuPrice, double shouyajin, double price, String note, int flag, int driverId, int hasDriver, int getCar, int returnCar, double advancePayment, long date, int status, String sale, String institutionsCode, int hasFranchiseFees) { this.id = id; this.carId = carId; this.planId = planId; this.zuchuDate = zuchuDate; this.huancheDate = huancheDate; this.planReturnDate = planReturnDate; this.limitMileage = limitMileage; this.accountId = accountId; this.guarantorId = guarantorId; this.beforeMileage = beforeMileage; this.afterMileage = afterMileage; this.jijiatime = jijiatime; this.timePrice = timePrice; this.outMileagePrice = outMileagePrice; this.outTimePrice = outTimePrice; this.zuPrice = zuPrice; this.shouyajin = shouyajin; this.price = price; this.note = note; this.flag = flag; this.driverId = driverId; this.hasDriver = hasDriver; this.getCar = getCar; this.returnCar = returnCar; this.advancePayment = advancePayment; this.date = date; this.status = status; this.sale = sale; this.institutionsCode = institutionsCode; this.hasFranchiseFees = hasFranchiseFees; } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Gkuaidibao/activity/KuaiDiCompanyListActivity.java package com.aiton.bamin.changtukepiao.Gkuaidibao.activity; import android.content.Intent; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.KeyEvent; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.BaseAdapter; import android.widget.ListView; import android.widget.TextView; import com.aiton.bamin.changtukepiao.Gkuaidibao.model.KuaiDiCompanyCode; import com.aiton.bamin.changtukepiao.R; import com.aiton.bamin.changtukepiao.ZcustomView.IndexListView; import java.util.ArrayList; import java.util.List; public class KuaiDiCompanyListActivity extends AppCompatActivity implements View.OnClickListener { private static String KEY = "kuaidi_list_company_name"; private static int RESULTCODE = 0; private List<KuaiDiCompanyCode> mKuaiDiCompanyCodeList = new ArrayList<KuaiDiCompanyCode>(); private ListView mLv_kuaidi_company_list; private IndexListView mIndexlistview; private TextView mTv_letter; private IndexListAdapter mAdapter; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_kuai_di_company_list); initIntent(); findViewID(); initUI(); setListener(); } private void setListener() { findViewById(R.id.imageView_back).setOnClickListener(this); } private void findViewID() { mLv_kuaidi_company_list = (ListView) findViewById(R.id.lv_kuaidi_company_list); mIndexlistview = (IndexListView) findViewById(R.id.indexlistview); mTv_letter = (TextView) findViewById(R.id.tv_letter); } private void initUI() { mAdapter = new IndexListAdapter(); mLv_kuaidi_company_list.setAdapter(mAdapter); mLv_kuaidi_company_list.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { setResultRerun(mKuaiDiCompanyCodeList.get(position).getKuaiDiCompany()); finish(); animFromBigToSmallOUT(); } }); mIndexlistview.setOnGetLetterListener(new IndexListView.GetLetterListener() { @Override public void onLetterChanged(String letter) { mTv_letter.setVisibility(View.VISIBLE); mTv_letter.setText(letter); //更新ListView的行数显示 int searchLetter_index = searchLetter(letter); mLv_kuaidi_company_list.setSelection(searchLetter_index); } @Override public void onActionUp() { mTv_letter.setVisibility(View.GONE); } }); } private void initIntent() { Intent intent = getIntent(); mKuaiDiCompanyCodeList = intent.getParcelableArrayListExtra("data"); } @Override public void onClick(View v) { finish(); AnimFromRightToLeftOUT(); } class IndexListAdapter extends BaseAdapter { @Override public int getCount() { return mKuaiDiCompanyCodeList.size(); } @Override public Object getItem(int position) { return null; } @Override public long getItemId(int position) { return 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { View layout = getLayoutInflater().inflate(R.layout.layout_index_letter_list_item, null); TextView tv_frist_letter = (TextView) layout.findViewById(R.id.tv_fristletter); TextView tv_kuaidi_company_name = (TextView) layout.findViewById(R.id.tv_kuaidi_company_name); if (mKuaiDiCompanyCodeList != null && mKuaiDiCompanyCodeList.size() > 0) { String substring_fristletter = mKuaiDiCompanyCodeList.get(position).getKuaiDiCode().substring(0, 1).toUpperCase(); tv_frist_letter.setText(substring_fristletter); tv_kuaidi_company_name.setText(mKuaiDiCompanyCodeList.get(position).getKuaiDiCompany()); if (position > 0) { //上一行首字母 String lastletter = mKuaiDiCompanyCodeList.get(position - 1).getKuaiDiCode().substring(0, 1).toUpperCase(); if (substring_fristletter.equals(lastletter)) { tv_frist_letter.setVisibility(View.GONE); } } } return layout; } } /** * 设置回传至调用该页面的相关数据 * * @param value */ private void setResultRerun(String value) { Intent data = new Intent(); data.putExtra(KEY, value); setResult(RESULTCODE, data); } /** * 搜索用户点击自定义的IndexListView控件所返回的字母 */ public int searchLetter(String letter) { for (int i = 0; i < mKuaiDiCompanyCodeList.size(); i++) { String string = mKuaiDiCompanyCodeList.get(i).getKuaiDiCode(); if (string.toUpperCase().startsWith(letter)) { return i; } } return -1; } /** * 从大到小结束动画 */ private void animFromBigToSmallOUT() { overridePendingTransition(R.anim.fade_in, R.anim.big_to_small_fade_out); } /** * 从右往左结束动画 */ private void AnimFromRightToLeftOUT() { overridePendingTransition(R.anim.fade_in, R.anim.push_left_out); } public boolean onKeyDown(int keyCode, android.view.KeyEvent event) { if (keyCode == KeyEvent.KEYCODE_BACK) { finish(); AnimFromRightToLeftOUT(); } return super.onKeyDown(keyCode, event); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Dchihewanle/QiangGuoActivity.java package com.aiton.bamin.changtukepiao.Dchihewanle; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.widget.Toast; import com.aiton.bamin.changtukepiao.R; public class QiangGuoActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_qiang_guo); } public void btnback(View v) { finish(); } public void btnconfirm(View v) { Toast.makeText(QiangGuoActivity.this,"跳转至支付页面",Toast.LENGTH_SHORT).show(); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Abusline/busline_aiton_model/CorporationInfo.java package com.aiton.bamin.changtukepiao.Abusline.busline_aiton_model; import java.io.Serializable; /** * Created by Administrator on 2015-12-16. */ public class CorporationInfo implements Serializable { private static final long serialVersionUID = 5769542726008868123L; String mTitle; String mSubTitle; public CorporationInfo() { } public CorporationInfo (String title, String subTitle) { mTitle = title; mSubTitle = subTitle; } public String getTitle () { return mTitle; } public void setTitle (String title) { mTitle = title; } public String getSubTitle () { return mSubTitle; } public void setSubTitle (String subTitle) { mSubTitle = subTitle; } @Override public String toString () { return "CorporationInfo{" + "mTitle='" + mTitle + '\'' + ", mSubTitle='" + mSubTitle + '\'' + '}'; } @Override public boolean equals (Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; CorporationInfo that = (CorporationInfo) o; if (mTitle != null ? !mTitle.equals(that.mTitle) : that.mTitle != null) return false; return !(mSubTitle != null ? !mSubTitle.equals(that.mSubTitle) : that.mSubTitle != null); } @Override public int hashCode () { int result = mTitle != null ? mTitle.hashCode() : 0; result = 31 * result + (mSubTitle != null ? mSubTitle.hashCode() : 0); return result; } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Abusline/busline_aition_fragment/RoutePlaneFragment.java package com.aiton.bamin.changtukepiao.Abusline.busline_aition_fragment; import android.content.Intent; import android.os.Bundle; import android.support.v4.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.ListView; import android.widget.RelativeLayout; import android.widget.TextView; import com.aiton.bamin.changtukepiao.Abusline.busline_aition_constants.ConstantBusLine; import com.aiton.bamin.changtukepiao.R; import com.aiton.bamin.changtukepiao.Abusline.busline_aiton.InPutLocAllActivity; import com.aiton.bamin.changtukepiao.Abusline.busline_aiton.InputLocActivity; import com.aiton.bamin.changtukepiao.Abusline.busline_aiton.TransitRouteActivity; /** * A simple {@link Fragment} subclass. */ public class RoutePlaneFragment extends Fragment implements View.OnClickListener { private View mInflate; private String[] loc01 = {"我的位置", "我的位置"}; private String[] loc02 = {"梅阳花园", "干休二所"}; private RelativeLayout mMylocation_rela; private RelativeLayout mInput_end_rela; private RelativeLayout mTransAdress; private TextView mTv_curr_loction; private TextView mTv_ending_station; private ImageView mIv_curr_loction; private ImageView mIv_ending_station; private boolean isTrans = false; private RelativeLayout mRl_search; public RoutePlaneFragment() { // Required empty public constructor } @Override public View onCreateView (LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment if (mInflate == null) { mInflate = inflater.inflate(R.layout.fragment_routeplane, container, false); initUI(); setListener(); } return mInflate; } private void setListener () { mMylocation_rela.setOnClickListener(this); mInput_end_rela.setOnClickListener(this); mTransAdress.setOnClickListener(this); mInflate.findViewById(R.id.ll_for_search_all).setOnClickListener(this); mRl_search.setOnClickListener(this); } private void initUI () { mMylocation_rela = (RelativeLayout) mInflate.findViewById(R.id.mylocation_rela); mInput_end_rela = (RelativeLayout) mInflate.findViewById(R.id.input_end_rela); mTransAdress = (RelativeLayout) mInflate.findViewById(R.id.rl_trans_adress); mTv_curr_loction = (TextView) mInflate.findViewById(R.id.tv_curr_loction); mIv_curr_loction = (ImageView) mInflate.findViewById(R.id.iv_curr_loction); mTv_ending_station = (TextView) mInflate.findViewById(R.id.tv_ending_station); mIv_ending_station = (ImageView) mInflate.findViewById(R.id.iv_ending_station); mRl_search = (RelativeLayout) mInflate.findViewById(R.id.rl_search); ListView transfer_listView = (ListView) mInflate.findViewById(R.id.transfer_listView); View transfer_foot = getLayoutInflater(getArguments()).inflate(R.layout.transfer_foot, null); transfer_listView.addFooterView(transfer_foot); transfer_listView.setAdapter(new MyAdapter()); } @Override public void onClick (View v) { Intent intent = new Intent(); switch (v.getId()) { case R.id.mylocation_rela: intent.setClass(getActivity(), InputLocActivity.class); if(isTrans) { intent.putExtra(ConstantBusLine.IntentKey.INPUT_TYPE_KEY, "end"); }else { intent.putExtra(ConstantBusLine.IntentKey.INPUT_TYPE_KEY, "myLoc"); } startActivityForResult(intent, ConstantBusLine.Request.CHOOSE_START); break; case R.id.input_end_rela: intent.setClass(getActivity(), InputLocActivity.class); if(isTrans) { intent.putExtra(ConstantBusLine.IntentKey.INPUT_TYPE_KEY, "myLoc"); }else { intent.putExtra(ConstantBusLine.IntentKey.INPUT_TYPE_KEY, "end"); } startActivityForResult(intent, ConstantBusLine.Request.CHOOSE_ARRIVE); break; case R.id.rl_trans_adress: isTrans = !isTrans; TransLoc(isTrans); break; case R.id.ll_for_search_all: intent.setClass(getActivity(), InPutLocAllActivity.class); startActivity(intent); break; case R.id.rl_search: intent.setClass(getActivity(), TransitRouteActivity.class); intent.putExtra("choosed_start", mTv_curr_loction.getText().toString()); intent.putExtra("choosed_end",mTv_ending_station.getText().toString()); startActivity(intent); break; } } class MyAdapter extends BaseAdapter { @Override public int getCount () { return loc01.length; } @Override public Object getItem (int position) { return null; } @Override public long getItemId (int position) { return 0; } @Override public View getView (int position, View convertView, ViewGroup parent) { View inflate = getLayoutInflater(getArguments()).inflate(R.layout.transfer_listitem, null); TextView loc01_tv = (TextView) inflate.findViewById(R.id.loc01_tv); TextView loc02_tv = (TextView) inflate.findViewById(R.id.loc02_tv); loc01_tv.setText(loc01[position]); loc02_tv.setText(loc02[position]); return inflate; } } public void TransLoc(boolean is_trans) { String curr_loction = mTv_curr_loction.getText().toString(); String end_loction = mTv_ending_station.getText().toString(); if(!is_trans) { mTv_curr_loction.setText(end_loction);//当前位置 mIv_curr_loction.setImageResource(R.mipmap.icon_poi_mylocation); mTv_ending_station.setText(curr_loction);//目的地位置 mIv_ending_station.setImageResource(R.mipmap.icon_poi_input); }else { mTv_curr_loction.setText(end_loction);//目的地位置 mIv_curr_loction.setImageResource(R.mipmap.icon_poi_input); mTv_ending_station.setText(curr_loction);//当前位置 mIv_ending_station.setImageResource(R.mipmap.icon_poi_mylocation); } } @Override public void onActivityResult(int requestCode, int resultCode, Intent data) { super.onActivityResult(requestCode, resultCode, data); if(data!=null) { switch (requestCode) { case ConstantBusLine.Request.CHOOSE_START: if(resultCode == ConstantBusLine.ResultCode.CHOOSED) { String choosed = data.getStringExtra("choosed"); mTv_curr_loction.setText(choosed); } break; case ConstantBusLine.Request.CHOOSE_ARRIVE: if(resultCode == ConstantBusLine.ResultCode.CHOOSED) { String choosed = data.getStringExtra("choosed"); mTv_ending_station.setText(choosed); } break; } } } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/DaCheZuCheFragment/OrderViewPagerFagment/JiGouYongCheOrderFragment.java package com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZuCheFragment.OrderViewPagerFagment; import android.content.Context; import android.content.Intent; import android.content.SharedPreferences; import android.os.Bundle; import android.support.v4.app.Fragment; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.ListView; import android.widget.TextView; import android.widget.Toast; import com.aiton.administrator.shane_library.shane.utils.GsonUtils; import com.aiton.administrator.shane_library.shane.utils.HTTPUtils; import com.aiton.administrator.shane_library.shane.utils.UILUtils; import com.aiton.administrator.shane_library.shane.utils.VolleyListener; import com.aiton.bamin.changtukepiao.Cdachezuche.QiYeZuChe.ZuCheJiGuoOrderDetailActivity; import com.aiton.bamin.changtukepiao.Cdachezuche.constant_dachezuche.ConstantDaCheZuChe; import com.aiton.bamin.changtukepiao.Cdachezuche.models.OrderListInfo; import com.aiton.bamin.changtukepiao.R; import com.aiton.bamin.changtukepiao.ZcustomView.CustomerFooter; import com.android.volley.VolleyError; import com.andview.refreshview.XRefreshView; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; /** * A simple {@link Fragment} subclass. */ public class JiGouYongCheOrderFragment extends Fragment { private int mPage = 0; private View mInflate; private ListView mListView_jigouyongche; private TextView mTv_jg_order_list_remind; private TextView mTv_jg_order_list_unlogin; private String mAccountId; private LinearLayout mLl_loading_remind_progress_bar; private XRefreshView mCustom_view_refresh; private int mTotalNum; private List<OrderListInfo.ContainsEntity> mContains = new ArrayList<>(); private MyAdapter mAdapter; private String mGetOrderListUrl; public JiGouYongCheOrderFragment() { // Required empty public constructor } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment if (mInflate == null) { mInflate = inflater.inflate(R.layout.fragment_ji_gou_yong_che_order, null); findID(); initUI(); if (isLogin()) { mTv_jg_order_list_unlogin.setVisibility(View.GONE);//未登陆状态文字提示—不可见 mLl_loading_remind_progress_bar.setVisibility(View.VISIBLE);//未登陆状态加载框—可见 //登陆状态 mContains.clear(); mPage = 0; initData(); } else { mTv_jg_order_list_unlogin.setVisibility(View.VISIBLE);//未登陆状态文字提示—可见 mLl_loading_remind_progress_bar.setVisibility(View.GONE);//未登陆状态加载框—不可见 } } //缓存的rootView需要判断是否已经被加过parent, 如果有parent需要从parent删除,要不然会发生这个rootview已经有parent的错误。 ViewGroup parent = (ViewGroup) mInflate.getParent(); if (parent != null) { parent.removeView(mInflate); } return mInflate; } private void initData() { Map<String, String> params = new HashMap<>(); params.put("account_id", mAccountId); params.put("page", mPage + ""); mGetOrderListUrl = ConstantDaCheZuChe.URL.GET_ORDER_LIST_INSTITUTIONS; HTTPUtils.post(getActivity(), mGetOrderListUrl, params, new VolleyListener() { @Override public void onErrorResponse(VolleyError volleyError) { } @Override public void onResponse(String s) { Log.e("onResponse ", "企业租车订单 " + s); mPage++; mLl_loading_remind_progress_bar.setVisibility(View.GONE);//数据加载完成后,加载框—不可见 OrderListInfo orderListInfo = GsonUtils.parseJSON(s, OrderListInfo.class); mTotalNum = orderListInfo.getNum(); mContains.addAll(orderListInfo.getContains()); if (mContains != null && mContains.size() > 0) { //有订单数据 } else { //无订单数据 mTv_jg_order_list_remind.setVisibility(View.VISIBLE);//未查到相关订单 } mCustom_view_refresh.stopLoadMore(); mAdapter.notifyDataSetChanged(); } }); } /** * 判断用户是否有登陆 */ private boolean isLogin() { SharedPreferences sp = getActivity().getSharedPreferences("isLogin", Context.MODE_PRIVATE); String phoneNum = sp.getString("phoneNum", ""); mAccountId = sp.getString("id", ""); if ("".equals(phoneNum)) { return false; } else { return true; } } private void findID() { mListView_jigouyongche = (ListView) mInflate.findViewById(R.id.listView_jigouyongche); mTv_jg_order_list_remind = (TextView) mInflate.findViewById(R.id.tv_jg_order_list_remind);//暂未查到您的相关订单… mTv_jg_order_list_unlogin = (TextView) mInflate.findViewById(R.id.tv_jg_order_list_unlogin);//未登陆,登陆后可查看相关订单! mLl_loading_remind_progress_bar = (LinearLayout) mInflate.findViewById(R.id.ll_loading_remind_progress_bar); mCustom_view_refresh = (XRefreshView) mInflate.findViewById(R.id.custom_view_refresh); } private void initUI() { mAdapter = new MyAdapter(); mListView_jigouyongche.setAdapter(mAdapter); mListView_jigouyongche.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { Intent intent = new Intent(getActivity(), ZuCheJiGuoOrderDetailActivity.class); intent.putExtra("order_id", mContains.get(position).getOrder().getId()); startActivityForResult(intent, 1); } }); initXRefreshView(); } private void initXRefreshView() { mCustom_view_refresh.setPullRefreshEnable(false); mCustom_view_refresh.setPullLoadEnable(false); mCustom_view_refresh.setPinnedTime(1000); mCustom_view_refresh.setAutoLoadMore(true); mCustom_view_refresh.setMoveForHorizontal(true); mCustom_view_refresh.setCustomFooterView(new CustomerFooter(getActivity())); mCustom_view_refresh.setXRefreshViewListener(new XRefreshView.SimpleXRefreshListener() { @Override public void onRefresh() { } @Override public void onLoadMore(boolean isSlience) { if (mTotalNum > mPage) { initData(); } else { mCustom_view_refresh.stopLoadMore(); Toast.makeText(getActivity(), "没有更多订单了", Toast.LENGTH_SHORT).show(); } } }); } class MyAdapter extends BaseAdapter { @Override public int getCount() { return mContains.size(); } @Override public Object getItem(int position) { return null; } @Override public long getItemId(int position) { return 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { View layout = getLayoutInflater(getArguments()).inflate(R.layout.jigouyongche_order_listitem, null); ImageView iv_car_img = (ImageView) layout.findViewById(R.id.iv_car_img);//显示汽车的图片 TextView tv_car_name = (TextView) layout.findViewById(R.id.tv_car_name);//显示汽车的名称 TextView tv_carriage_count = (TextView) layout.findViewById(R.id.tv_carriage_count);//显示汽车的厢数 TextView tv_displacement = (TextView) layout.findViewById(R.id.tv_displacement);//显示汽车是否为自动档及排量 TextView tv_car_seat_count = (TextView) layout.findViewById(R.id.tv_car_seat_count);//显示汽车可乘坐人数 TextView tv_get_car_date = (TextView) layout.findViewById(R.id.tv_get_car_date);//显示取车时间的日期 TextView tv_get_car_time = (TextView) layout.findViewById(R.id.tv_get_car_time);//显示取车时间的时间 TextView tv_how_long = (TextView) layout.findViewById(R.id.tv_how_long);//显示租期 TextView tv_return_car_date = (TextView) layout.findViewById(R.id.tv_return_car_date);//显示还车时间的日期 TextView tv_return_car_time = (TextView) layout.findViewById(R.id.tv_return_car_time);//显示还车时间的时间 TextView tv_dache_jg_store_name_get = (TextView) layout.findViewById(R.id.tv_dache_jg_store_name_get);//显示取车门店名称 TextView tv_dache_jg_store_name_return = (TextView) layout.findViewById(R.id.tv_dache_jg_store_name_return);//显示还车门店名称 TextView tv_order_num = (TextView) layout.findViewById(R.id.tv_order_list_num);//显示还车门店名称 TextView tv_order_stage = (TextView) layout.findViewById(R.id.tv_order_list_stage);//显示还车门店名称 if (mContains != null && mContains.size() > 0) { tv_car_name.setText(mContains.get(position).getCar().getModel()); tv_carriage_count.setText(mContains.get(position).getCar().getBox()); switch (mContains.get(position).getCar().getZidong()) { case 0: tv_displacement.setText(mContains.get(position).getCar().getPailiang() + "自动"); break; case 1: tv_displacement.setText(mContains.get(position).getCar().getPailiang() + "手动"); break; } tv_car_seat_count.setText("可乘坐" + mContains.get(position).getCar().getSeat() + "人"); tv_get_car_date.setText(getDateToString(mContains.get(position).getOrder().getZuchuDate())); tv_get_car_time.setText(getTimeToString(mContains.get(position).getOrder().getZuchuDate())); tv_how_long.setText(getHowLong(mContains.get(position).getOrder().getZuchuDate(), mContains.get(position).getOrder().getPlanReturnDate())); tv_return_car_date.setText(getDateToString(mContains.get(position).getOrder().getPlanReturnDate())); tv_return_car_time.setText(getTimeToString(mContains.get(position).getOrder().getPlanReturnDate())); tv_dache_jg_store_name_get.setText(mContains.get(position).getGetCarStore().getName()); tv_dache_jg_store_name_return.setText(mContains.get(position).getReturnStore().getName()); tv_order_num.setText(mContains.get(position).getOrder().getId() + ""); switch (mContains.get(position).getOrder().getFlag()) { // flag:订单状态 0:进行中 1:完成 2:取消 3:等待结算(已还车) case 0: tv_order_stage.setText("订单进行中"); break; case 1: tv_order_stage.setText("订单已完成"); break; case 2: tv_order_stage.setText("订单已取消"); break; case 3: tv_order_stage.setText("订单等待结算(已还车)"); break; } UILUtils.displayImageNoAnim(mContains.get(position).getCar().getImage(), iv_car_img); } return layout; } } /** * 将时间毫秒数转换成日期形式 */ private String getDateToString(long l) { SimpleDateFormat mSimpleDateFormat = new SimpleDateFormat("MM-dd"); String date_format = mSimpleDateFormat.format(l); return date_format; } /** * 将时间毫秒数转换成星期+时间的形式 */ private String getTimeToString(long l) { SimpleDateFormat mSimpleDateFormat = new SimpleDateFormat("EE HH:mm"); String time_format = mSimpleDateFormat.format(l); return time_format; } /** * 计算初始时间与结束时间之间相关的时间天数 */ private String getHowLong(long starttime, long endting) { long howLong = (endting + (2 * 3600 * 1000)) - starttime; long l = howLong / (24 * 3600 * 1000);//得到多少天 if (l > 30) { long month = l / 30; long day = l % 30; if (day == 0.0) { return month + "个月"; } else { return month + "个月 + " + day + "天"; } } else { return l + "天"; } } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Zeverything/everything_fragment/OrderEverythingFragment.java package com.aiton.bamin.changtukepiao.Zeverything.everything_fragment; import android.content.Context; import android.content.Intent; import android.content.SharedPreferences; import android.os.Bundle; import android.support.v4.app.Fragment; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.ListView; import android.widget.TextView; import android.widget.Toast; import com.aiton.administrator.shane_library.shane.utils.GsonUtils; import com.aiton.administrator.shane_library.shane.utils.HTTPUtils; import com.aiton.administrator.shane_library.shane.utils.VolleyListener; import com.aiton.bamin.changtukepiao.Bchangtukepiao.activity.OrderDeatilActivity; import com.aiton.bamin.changtukepiao.Cdachezuche.QiYeZuChe.ZuCheJiGuoOrderDetailActivity; import com.aiton.bamin.changtukepiao.R; import com.aiton.bamin.changtukepiao.ZcustomView.CustomerFooter; import com.aiton.bamin.changtukepiao.Zeverything.constant.EverythingConstant; import com.aiton.bamin.changtukepiao.Zeverything.model.EveryThingOrderList; import com.android.volley.VolleyError; import com.andview.refreshview.XRefreshView; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; public class OrderEverythingFragment extends Fragment { private ListView mLv_everything_order_list; private EverythingOrderAdapter mEverythingOrderAdapter; private View mLayout; private LayoutInflater mLayoutInflater; private boolean mIsLogin; private String mAccountID; private int mPage = 0; private TextView mTv_is_login_remind; private LinearLayout mLl_loading; private List<EveryThingOrderList.CodeEntity.ContainsEntity> mContains = new ArrayList<>(); private XRefreshView mCustom_view_refresh; private int mTotalNum; public OrderEverythingFragment() { // Required empty public constructor } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { mLayoutInflater = inflater; mLayout = inflater.inflate(R.layout.fragment_order_everything, null); getAccountID(); findViewID(); initUI(); if (mIsLogin) { mTv_is_login_remind.setVisibility(View.GONE); mLl_loading.setVisibility(View.VISIBLE); initData(); } else { mTv_is_login_remind.setVisibility(View.VISIBLE); mLl_loading.setVisibility(View.GONE); } return mLayout; } private void getAccountID() { SharedPreferences sp = getActivity().getSharedPreferences("isLogin", Context.MODE_PRIVATE); String phoneNum = sp.getString("phoneNum", ""); if ("".equals(phoneNum)) { mIsLogin = false; } else { mAccountID = sp.getString("id", ""); mIsLogin = true; } } private void initData() { Map<String, String> params = new HashMap<>(); params.put("account_id", mAccountID); params.put("page", mPage + ""); HTTPUtils.post(getActivity(), EverythingConstant.GET_ALL_ORDER_LIST, params, new VolleyListener() { @Override public void onErrorResponse(VolleyError volleyError) { } @Override public void onResponse(String s) { Log.e("onResponse ", "onResponse " + s); mPage++; mLl_loading.setVisibility(View.GONE); EveryThingOrderList everyThingOrderList = GsonUtils.parseJSON(s, EveryThingOrderList.class); mTotalNum = everyThingOrderList.getCode().getNum(); mContains.addAll(everyThingOrderList.getCode().getContains()); mCustom_view_refresh.stopLoadMore(); mEverythingOrderAdapter.notifyDataSetChanged(); } }); } private void findViewID() { mLv_everything_order_list = (ListView) mLayout.findViewById(R.id.lv_everything_order_list); mTv_is_login_remind = (TextView) mLayout.findViewById(R.id.tv_is_login_remind); mLl_loading = (LinearLayout) mLayout.findViewById(R.id.ll_loading); mCustom_view_refresh = (XRefreshView) mLayout.findViewById(R.id.custom_view_refresh); } private void initUI() { mEverythingOrderAdapter = new EverythingOrderAdapter(); mLv_everything_order_list.setAdapter(mEverythingOrderAdapter); mLv_everything_order_list.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { if (mContains != null && mContains.size() > 0) { if (!"".equals(mContains.get(position).getOrder_id())) { Intent intent = new Intent(); if (1 == mContains.get(position).getType()) { intent.setClass(getActivity(), ZuCheJiGuoOrderDetailActivity.class); intent.putExtra("order_id", Integer.parseInt(mContains.get(position).getOrder_id())); intent.putExtra("everyting_order_list", "everyting_order_list"); Log.e("onItemClick ", "onItemClick " + mContains.get(position).getOrder_id()); } else if (0 == mContains.get(position).getType()) { intent.setClass(getActivity(), OrderDeatilActivity.class); intent.putExtra("BookLogAID", mContains.get(position).getOrder_id()); intent.putExtra("everyting_order_list", "everyting_order_list"); } else { Toast.makeText(getActivity(), "未找到相关页面\n请向我们反馈,谢谢!", Toast.LENGTH_SHORT).show(); return; } startActivity(intent); } } } }); initXRefreshView(); } private void initXRefreshView() { mCustom_view_refresh.setPullRefreshEnable(false); mCustom_view_refresh.setPullLoadEnable(false); mCustom_view_refresh.setPinnedTime(1000); mCustom_view_refresh.setAutoLoadMore(true); mCustom_view_refresh.setMoveForHorizontal(true); mCustom_view_refresh.setCustomFooterView(new CustomerFooter(getActivity())); mCustom_view_refresh.setXRefreshViewListener(new XRefreshView.SimpleXRefreshListener() { @Override public void onRefresh() { } @Override public void onLoadMore(boolean isSlience) { if (mTotalNum > mPage) { initData(); } else { mCustom_view_refresh.stopLoadMore(); Toast.makeText(getActivity(), "没有更多订单了", Toast.LENGTH_SHORT).show(); } } }); } class EverythingOrderAdapter extends BaseAdapter { @Override public int getCount() { return mContains.size(); } @Override public Object getItem(int position) { return null; } @Override public long getItemId(int position) { return 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { View listViewLayout = mLayoutInflater.inflate(R.layout.layout_everything_order_list_item, null); TextView tv_order_date = (TextView) listViewLayout.findViewById(R.id.tv_order_date); TextView tv_order_week = (TextView) listViewLayout.findViewById(R.id.tv_order_week); TextView tv_order_time = (TextView) listViewLayout.findViewById(R.id.tv_order_time); ImageView iv_order_list_type = (ImageView) listViewLayout.findViewById(R.id.iv_order_list_type); TextView tv_order_type_name = (TextView) listViewLayout.findViewById(R.id.tv_order_type_name); TextView tv_order_price = (TextView) listViewLayout.findViewById(R.id.tv_order_price); TextView tv_order_list_stage = (TextView) listViewLayout.findViewById(R.id.tv_order_list_stage); TextView tv_order_list_msg = (TextView) listViewLayout.findViewById(R.id.tv_order_list_msg); //getFlag 租车: 0 已结算 1 等待结算 票务 1 未支付 0 已支付 2 较早 if (mContains != null && mContains.size() > 0) { if (1 == mContains.get(position).getType()) { tv_order_type_name.setText("租车·用车"); iv_order_list_type.setImageResource(R.mipmap.car_order_2x); if (0 == mContains.get(position).getFlag()) { tv_order_list_stage.setText("已结算"); } else if (1 == mContains.get(position).getFlag()) { tv_order_list_stage.setText("等待结算中"); } } else if (0 == mContains.get(position).getType()) { tv_order_type_name.setText("长途客票"); iv_order_list_type.setImageResource(R.mipmap.kepiaoorder_2x); if (0 == mContains.get(position).getFlag()) { tv_order_list_stage.setText("已支付"); } else if (1 == mContains.get(position).getFlag()) { tv_order_list_stage.setText("未支付"); } else if (2 == mContains.get(position).getFlag()) { tv_order_list_stage.setText("点击查看详情"); } } tv_order_list_msg.setText(mContains.get(position).getYuliu()); tv_order_date.setText(getDateToString(mContains.get(position).getDate())); tv_order_week.setText(getWeekToString(mContains.get(position).getDate())); tv_order_time.setText(getTimeToString(mContains.get(position).getDate())); tv_order_price.setText(mContains.get(position).getPrice() + ""); } return listViewLayout; } } private String getDateToString(long l) { SimpleDateFormat mSimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd"); String time_format = mSimpleDateFormat.format(l); return time_format; } private String getWeekToString(long l) { SimpleDateFormat mSimpleDateFormat = new SimpleDateFormat("EE"); String time_format = mSimpleDateFormat.format(l); return time_format; } private String getTimeToString(long l) { SimpleDateFormat mSimpleDateFormat = new SimpleDateFormat("HH:mm"); String time_format = mSimpleDateFormat.format(l); return time_format; } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/models/ZiJiaZuCheChooseCityDate.java package com.aiton.bamin.changtukepiao.Cdachezuche.models; import java.io.Serializable; /** * Created by zjb on 2016/3/28. */ public class ZiJiaZuCheChooseCityDate implements Serializable { private long zuchuDate; //租出时间 private long planReturnDate;//计划还车时间 private int getCar;//取车地点 private int returnCar;//还车地点 private String takeCarStore;//取车门店 private String returnCarStore;//换车门店 private int dayCounts; private String mStartDate; private String mStartTime; private String mEndDate; private String mEndTime; public long getZuchuDate() { return zuchuDate; } public void setZuchuDate(long zuchuDate) { this.zuchuDate = zuchuDate; } public long getPlanReturnDate() { return planReturnDate; } public void setPlanReturnDate(long planReturnDate) { this.planReturnDate = planReturnDate; } public int getGetCar() { return getCar; } public void setGetCar(int getCar) { this.getCar = getCar; } public int getReturnCar() { return returnCar; } public void setReturnCar(int returnCar) { this.returnCar = returnCar; } public String getTakeCarStore() { return takeCarStore; } public void setTakeCarStore(String takeCarStore) { this.takeCarStore = takeCarStore; } public String getReturnCarStore() { return returnCarStore; } public void setReturnCarStore(String returnCarStore) { this.returnCarStore = returnCarStore; } public int getDayCounts() { return dayCounts; } public void setDayCounts(int dayCounts) { this.dayCounts = dayCounts; } public String getStartDate() { return mStartDate; } public void setStartDate(String startDate) { mStartDate = startDate; } public String getStartTime() { return mStartTime; } public void setStartTime(String startTime) { mStartTime = startTime; } public String getEndDate() { return mEndDate; } public void setEndDate(String endDate) { mEndDate = endDate; } public String getEndTime() { return mEndTime; } public void setEndTime(String endTime) { mEndTime = endTime; } public ZiJiaZuCheChooseCityDate(long zuchuDate, long planReturnDate, int getCar, int returnCar, String takeCarStore, String returnCarStore, int dayCounts, String startDate, String startTime, String endDate, String endTime) { this.zuchuDate = zuchuDate; this.planReturnDate = planReturnDate; this.getCar = getCar; this.returnCar = returnCar; this.takeCarStore = takeCarStore; this.returnCarStore = returnCarStore; this.dayCounts = dayCounts; mStartDate = startDate; mStartTime = startTime; mEndDate = endDate; mEndTime = endTime; } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Ddaibanpaotui/fragment_dabanpaotui/DaiBanPaoTuiOrderFragment.java package com.aiton.bamin.changtukepiao.Ddaibanpaotui.fragment_dabanpaotui; import android.os.Bundle; import android.support.v4.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.aiton.bamin.changtukepiao.R; /** * A simple {@link Fragment} subclass. */ public class DaiBanPaoTuiOrderFragment extends Fragment { public DaiBanPaoTuiOrderFragment() { // Required empty public constructor } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment return inflater.inflate(R.layout.fragment_main_dai_ban_pao_tui_order, container, false); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Dchihewanle/AActivity.java package com.aiton.bamin.changtukepiao.Dchihewanle; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.widget.Toast; import com.aiton.bamin.changtukepiao.R; public class AActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_a); } public void back(View v) { finish(); } public void detail(View v) { Toast.makeText(AActivity.this,"跳转到商家详情页面",Toast.LENGTH_SHORT).show(); } } <file_sep>/八闽出行/src/main/java/com/aiton/bamin/changtukepiao/Cdachezuche/DaCheZuCheFragment/MoreFragment.java package com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZuCheFragment; import android.content.Intent; import android.os.Bundle; import android.support.v4.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.aiton.bamin.changtukepiao.Cdachezuche.DaCheZhuCheActivity.UsedAdressActivity; import com.aiton.bamin.changtukepiao.R; /** * A simple {@link Fragment} subclass. */ public class MoreFragment extends Fragment implements View.OnClickListener { private View mInflate; public MoreFragment() { // Required empty public constructor } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment if (mInflate == null) { mInflate = inflater.inflate(R.layout.fragment_more2, null); findID(); initUI(); setListener(); } //缓存的rootView需要判断是否已经被加过parent, 如果有parent需要从parent删除,要不然会发生这个rootview已经有parent的错误。 ViewGroup parent = (ViewGroup) mInflate.getParent(); if (parent != null) { parent.removeView(mInflate); } return mInflate; } private void setListener() { mInflate.findViewById(R.id.rela_usedAddress).setOnClickListener(this); } private void initUI() { } private void findID() { } @Override public void onClick(View v) { Intent intent = new Intent(); switch (v.getId()){ case R.id.rela_usedAddress: intent.setClass(getActivity(), UsedAdressActivity.class); startActivity(intent); break; } } }
0692e1ff2f826da5cec951129af92b43c582dca1
[ "Java", "Gradle" ]
30
Java
aitonjiaotong/BaMinEverything2016-4-8
7a70966238a99cd6ade08ac939b015b38cf2b3fd
acc1529c2011fcd91467fd8889d68fa7c953bd89
refs/heads/main
<repo_name>AIsimrand/Video-Upscalling-Using-SRGAN<file_sep>/app.py # importing necessary libraries and functions import numpy as np from flask import Flask, request, jsonify, render_template, send_file, redirect, url_for from sr_class import SR from werkzeug.utils import secure_filename import os import cv2 import imquality.brisque as brisque import tensorflow as tf from os.path import isdir import shutil from tensorflow.keras.preprocessing.image import array_to_img from tensorflow.keras.preprocessing.image import save_img from flask_share import Share share = Share() app = Flask(__name__) #Initialize the flask App share.init_app(app) #model = pickle.load(open('model.pkl', 'rb')) # loading the trained model @app.route('/') # Homepage def home(): return render_template('index.html') UPLOAD_FOLDER = 'uploads/' app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER @app.route('/upScale',methods=['Get','POST']) def upScale(): file = request.files['file_name'] quality = request.form['quality'] if quality == "Base": scale = request.form['scale'] else: scale = 1 if file.filename.lower().endswith(('.png', '.jpg', '.jpeg')): size, fs_img, Final_Img = upScale_image(file,quality,scale) image = cv2.imread(app.config['UPLOAD_FOLDER']+"/Image.jpg") image = cv2.resize(np.asarray(image), size) acc0 = brisque.score(image) #fs_img = cv2.imread("./static/downloads/enhanced_photo01.jpg") acc1 = acc0 - brisque.score(fs_img) acc2 = acc0 - brisque.score(Final_Img) return render_template('index1.html',prediction_text=round(acc1,4),prediction_text1=round(acc2,4)) elif file.filename.lower().endswith(('.3gp', '.mp4', '.avi', '.mkv')): size = upScale_video(file,quality,scale) #cap = cv2.VideoCapture(app.config['UPLOAD_FOLDER']+"/Video.mp4") acc0 = frame_generator(app.config['UPLOAD_FOLDER']+"/Video.mp4",name='image000.jpg',size=size,save_img=False) #cap.release() #cap = cv2.VideoCapture("./static/downloads/fsrcnn_output.avi") score = frame_generator("./static/downloads/fsrcnn_output.avi",name='fsrcnn_image.jpg',size=size,save_img=True) acc1 = acc0 - score #cap.release() #cap = cv2.VideoCapture("./static/downloads/srgan_output.avi") score = frame_generator("./static/downloads/srgan_output.avi",name='srgan_image.jpg',size=size,save_img=True) acc2 = acc0 - score #cap.release() return render_template('index2.html',prediction_text=round(acc1,4),prediction_text1=round(acc2,4)) def frame_generator(path,name,size,save_img): cap = cv2.VideoCapture(path) ret,frame=cap.read() image = cv2.resize(frame, size) if save_img: path = 'static/downloads/' cv2.imwrite(os.path.join(path , name), image) cap.release() return brisque.score(image) def upScale_video(vid,quality,scale): vid.save(os.path.join(app.config['UPLOAD_FOLDER'], "Video.mp4")) cam = cv2.VideoCapture(app.config['UPLOAD_FOLDER']+"/Video.mp4") #fps = cam.get(cv2.CAP_PROP_FPS) if quality == "144p": size = (256,144) elif quality == "240p": size = (426,240) elif quality == "360p": size = (480,360) elif quality == "480p": size = (640,480) sr = SR() s_res = sr.res_video(cam) frame_width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH) * int(scale)) frame_height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT) * int(scale)) #int(cap.get(4)) #height, width, layers = img.shape s1 = (frame_width,frame_height) if quality == "Base": sr.fsr_video(size=s1) size = s1 else: sr.fsr_video(size=size) size = size fps = 20 out = cv2.VideoWriter('./static/downloads/srgan_output.avi',cv2.VideoWriter_fourcc(*'MJPG'), fps , size) for i in range(len(s_res)): out.write(cv2.resize(cv2.imread(s_res[i]), size)) out.release() return size def upScale_image(img,quality,scale): ''' For rendering results on HTML GUI ''' sr = SR() sr.init_super("FSRCNN_x4") model = sr.srgan() #generator() model.load_weights("gan_generator.h5") filename = secure_filename(img.filename) img.save(os.path.join(app.config['UPLOAD_FOLDER'], "Image.jpg")) if quality == "144p": size = (256,144) elif quality == "240p": size = (426,240) elif quality == "360p": size = (480,360) elif quality == "480p": size = (640,480) image = cv2.imread(app.config['UPLOAD_FOLDER']+"/Image.jpg") width = int(image.shape[1] * int(scale)) height = int(image.shape[0] * int(scale)) print(image.shape[1]) s1 = (width,height) sres = sr.resolve_single(model, image) if quality == "Base": fs_img = sr.super_res(image, name= 'enhanced_photo01.jpg', save_img=True, scale=True, size=s1) Final_Img = cv2.resize(np.asarray(sres),s1) size = s1 else: fs_img = sr.super_res(image, name= 'enhanced_photo01.jpg', save_img=True, scale=False, size=size) Final_Img = cv2.resize(np.asarray(sres),size) size = size path = 'static/downloads/enhanced_photo.jpg' cv2.imwrite(path, Final_Img) return size, fs_img, Final_Img @app.route('/static/downloads') def downloads(): filename = "./static/downloads/enhanced_photo.jpg" return send_file(filename, as_attachment=True) @app.route('/static') def download_video(): filename = "./static/downloads/srgan_output.avi" return send_file(filename, as_attachment=True) if __name__ == "__main__": app.run(debug=True) <file_sep>/requirements.txt opencv-python matplotlib opencv-contrib-python tensorflow image-quality flask-share<file_sep>/sr_class.py import cv2 import numpy as np import matplotlib.pyplot as plt import os from cv2 import dnn_superres import tensorflow as tf from utils import load_image, plot_sample from os.path import isdir import shutil from tensorflow.keras.preprocessing.image import array_to_img from tensorflow.keras.preprocessing.image import save_img from PIL import Image import tensorflow as tf from tensorflow.python.keras.layers import Add, BatchNormalization, Conv2D, Dense, Flatten, Input, LeakyReLU, PReLU, Lambda from tensorflow.python.keras.models import Model from utils import load_image, plot_sample from IPython import get_ipython #get_ipython().run_line_magic('cd', 'contact-form-03\\') #%cd Mini Project II/Final <EMAIL>('/upscale',methods=['POST']) #print(os.getcwd()) class SR: def init_super(self, model, base_path='Model'): global sr, model_name, model_scale # Define global variable sr = dnn_superres.DnnSuperResImpl_create() # Create an SR object model_path = os.path.join(model +".pb") # Define model path model_name = model.split('_')[0].lower() # Extract model name from model path model_scale = int(model.split("_")[1][1]) # Extract model scale from model path sr.readModel(model_path) # Read the desired model sr.setModel(model_name, model_scale) def super_res(self, image, returndata=False, save_img=True, name='test.png', size=(256,144), scale=True): #image = cv2.imread("uploads/Image.jpg") #print(type(img)) Final_Img = sr.upsample(image) # Upscale the image if returndata: return Final_Img else: if save_img: if scale: Final_Img = cv2.resize(Final_Img,size) else: Final_Img = cv2.resize(Final_Img,size) path = 'static/downloads/' cv2.imwrite(os.path.join(path , name), Final_Img) #cv2.imwrite("{{ url_for('predict'),filename=}}" + name, Final_Img) return Final_Img def fsr_video(self,size): fps=0 self.init_super("FSRCNN_x4") cap = cv2.VideoCapture("./uploads/Video.mp4") size = size #(frame_width, frame_height) #(640,480) # Define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'MJPG') out = cv2.VideoWriter('./static/downloads/fsrcnn_output.avi', fourcc, 20, size) while(cap.isOpened()): ret,frame=cap.read() if ret == True: image = cv2.flip(frame,180) image = cv2.flip(image,1) image = self.super_res(image, returndata=True, save_img=False, size=size) #cv2.putText(image, 'FPS: {:.2f}'.format(fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX,0.8, (255, 20, 55), 1) #cv2.imshow("Super Resolution", image) out.write(cv2.resize(image, size)) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break cap.release() out.release() cv2.destroyAllWindows() def res_video(self,cam): try: if not os.path.exists('./uploads/data'): os.makedirs('./uploads/data') except OSError: print ('Error: Creating directory of data') currentframe = 0 arr_img = [] while(True): ret,frame = cam.read() if ret: name = './uploads/data/frame' + str(currentframe).zfill(3) + '.jpg' #print ('Creating...' + name) cv2.imwrite(name, frame) currentframe += 1 arr_img.append(name) else: break model = self.srgan() #generator() model.load_weights('gan_generator.h5') arr_output=[] n= len(arr_img) for i in range(n): lr = load_image(arr_img[i]) sres = self.resolve_single(model, lr) arr_output.append(sres) cam.release() cv2.destroyAllWindows() if isdir("./static/downloads/output_images"): shutil.rmtree("./static/downloads/output_images") os.makedirs("./static/downloads/output_images") s_res= [] for j in range(len(arr_output)): out_name = './static/downloads/output_images/frame' + str(j).zfill(3) + '.jpg' img_pil = array_to_img(arr_output[j]) img1 = save_img(out_name, img_pil) s_res.append(out_name) return s_res def resolve_single(self,model, lr): return self.resolve(model, tf.expand_dims(lr, axis=0))[0] def resolve(self,model, lr_batch): lr_batch = tf.cast(lr_batch, tf.float32) sr_batch = model(lr_batch) sr_batch = tf.clip_by_value(sr_batch, 0, 255) sr_batch = tf.round(sr_batch) sr_batch = tf.cast(sr_batch, tf.uint8) return sr_batch def pixel_shuffle(self,scale): return lambda x: tf.nn.depth_to_space(x, scale) def normalize_01(self,x): """Normalizes RGB images to [0, 1].""" return x / 255.0 def denormalize_m11(self,x): """Inverse of normalize_m11.""" return (x + 1) * 127.5 def res_block(self,x_in, num_filters, momentum=0.8): x = Conv2D(num_filters, kernel_size=3, padding='same')(x_in) x = BatchNormalization(momentum=momentum)(x) x = PReLU(shared_axes=[1, 2])(x) x = Conv2D(num_filters, kernel_size=3, padding='same')(x) x = BatchNormalization(momentum=momentum)(x) x = Add()([x_in, x]) return x def upsample(self,x_in, num_filters): x = Conv2D(num_filters, kernel_size=3, padding='same')(x_in) x = Lambda(self.pixel_shuffle(scale=2))(x) return PReLU(shared_axes=[1, 2])(x) def srgan(self,num_filters=64, num_res_blocks=16): x_in = Input(shape=(None, None, 3)) x = Lambda(self.normalize_01)(x_in) x = Conv2D(num_filters, kernel_size=9, padding='same')(x) x = x_1 = PReLU(shared_axes=[1, 2])(x) for _ in range(num_res_blocks): x = self.res_block(x, num_filters) x = Conv2D(num_filters, kernel_size=3, padding='same')(x) x = BatchNormalization()(x) x = Add()([x_1, x]) x = self.upsample(x, num_filters * 4) x = self.upsample(x, num_filters * 4) x = Conv2D(3, kernel_size=9, padding='same', activation='tanh')(x) x = Lambda(self.denormalize_m11)(x) return Model(x_in, x) <file_sep>/README.md # Video-Upscalling-Using-SRGAN
d0d71794907bea9f69ac4ce8f2355349822db2b9
[ "Markdown", "Python", "Text" ]
4
Python
AIsimrand/Video-Upscalling-Using-SRGAN
c1ca3f1afb63d4391d181e73f95383286dc54906
40963eab1cd1cee9237047fbd4ced19eae857b20
refs/heads/master
<file_sep>package movie; import android.os.Bundle; import android.app.Fragment; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.TextView; import java.net.MalformedURLException; import java.net.URL; import se.mah.ad1532.youplay.Controller; import se.mah.ad1532.youplay.R; public class ChildViewFrag extends Fragment { private TextView title; private TextView movieInfo; private TextView releaseYear; private TextView rating; private UrlImageView thumbnail; private Button btnTrailer; private Controller controller; private String titelOfmovie; public ChildViewFrag() { // Required empty public constructor } public void setController(Controller controller){ this.controller = controller; } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment View view = inflater.inflate(R.layout.child_view, container, false); initializeComponents(view); return view; } private void initializeComponents(View view) { title = (TextView) view.findViewById(R.id.child_tvTitle); movieInfo = (TextView)view.findViewById(R.id.child_tvMovieinfo); releaseYear = (TextView)view.findViewById(R.id.child_tvRelease2); rating = (TextView)view.findViewById(R.id.child_tvRating2); thumbnail = new UrlImageView(getActivity()); thumbnail = (UrlImageView)view.findViewById(R.id.child_thumbnail); btnTrailer = (Button)view.findViewById(R.id.btnTrailer); btnTrailer.setOnClickListener(new btnTrailerClicked()); } public void updateView(Movie movie){ titelOfmovie = movie.getTitle(); title.setText(movie.getTitle()); movieInfo.setText(movie.getMovieInfo()); releaseYear.setText(movie.getReleaseYear()); rating.setText(movie.getRating()); URL url = null; try { url = new URL(movie.getPicThumbnail()); } catch (MalformedURLException e) { e.printStackTrace(); } thumbnail.setImageURL(url); } private class btnTrailerClicked implements View.OnClickListener { @Override public void onClick(View v) { if (titelOfmovie==null) { Log.i("HOSSI", "HOSSI nulll"); } controller.startTrailer(titelOfmovie); } } } <file_sep>package player; import android.os.Bundle; import android.util.Log; import com.google.android.youtube.player.YouTubeBaseActivity; import com.google.android.youtube.player.YouTubeInitializationResult; import com.google.android.youtube.player.YouTubePlayer; import com.google.android.youtube.player.YouTubePlayerView; import se.mah.ad1532.youplay.MainActivity; import se.mah.ad1532.youplay.R; /** * Created by Ussi on 11/9/2014 * */ public class VideoPlayer extends YouTubeBaseActivity implements YouTubePlayer.OnInitializedListener { private YouTubePlayer youTubePlayer; String video_id; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.youtube_player); Bundle extras = getIntent().getExtras(); if (extras != null) { video_id = (String)extras.get("videoid"); } YouTubePlayerView youTubePlayerView = (YouTubePlayerView)findViewById(R.id.youtubeplayerview); youTubePlayerView.initialize(MainActivity.YOUTUBE_API_KEY, this); } @Override public void onInitializationFailure(YouTubePlayer.Provider provider, YouTubeInitializationResult result) { if (result.isUserRecoverableError()) { result.getErrorDialog(this, -1).show(); } else { Log.i("HOSSI", "HOSSI YouTubePlayer.onInitializationFailure(): " + result.toString()); } } @Override public void onInitializationSuccess(YouTubePlayer.Provider provider, YouTubePlayer player, boolean wasRestored) { youTubePlayer = player; youTubePlayer.setShowFullscreenButton(true); if (!wasRestored) { player.cueVideo(Search.VIDEO_ID); } } } <file_sep>package movie; import android.content.Context; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ArrayAdapter; import android.widget.TextView; import java.net.MalformedURLException; import java.net.URL; import java.util.List; import se.mah.ad1532.youplay.R; /** * Created by Kim on 2014-11-11. */ public class CustomListAdapter extends ArrayAdapter<Movie>{ public CustomListAdapter(Context context, int textViewResourceId) { super(context, textViewResourceId); } public CustomListAdapter(Context context, int resource, List<Movie> items) { super(context, resource, items); } @Override public View getView(int position, View convertView, ViewGroup parent) { URL url = null; UrlImageView urlImg; View v = convertView; if (v == null) { LayoutInflater vi; vi = LayoutInflater.from(getContext()); v = vi.inflate(R.layout.custom_list_row, null); } Movie p = getItem(position); if (p != null) { try { url = new URL(p.getPicThumbnail()); } catch (MalformedURLException e) { e.printStackTrace(); } urlImg = new UrlImageView(getContext()); TextView movieTitle = (TextView) v.findViewById(R.id.customTvTitle); TextView movieRating = (TextView) v.findViewById(R.id.customTvRating2); urlImg = (UrlImageView)v.findViewById(R.id.child_thumbnail); urlImg.setImageURL(url); if (movieTitle != null) { movieTitle.setText(p.getTitle()); } if (movieRating != null) { movieRating.setText(p.getRating()); } } return v; } } <file_sep>package movie; import android.os.Bundle; import android.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.FrameLayout; import android.widget.ListView; import java.util.ArrayList; import se.mah.ad1532.youplay.Controller; import se.mah.ad1532.youplay.R; public class MovieListFragment extends Fragment { private Controller controller; private ListView movieList; private ArrayList movieArrayList; private int lastMoviePressed = -1; private FrameLayout container2; public MovieListFragment() { // Required empty public constructor } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container,Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_movie_list, container, false); inititalizeComponents(view); return view; } private void inititalizeComponents(View view) { movieList = (ListView) view.findViewById(R.id.movieList); movieList.setOnItemClickListener(new listItemClicked()); } public void setMovieList(ArrayList arrayList){ if(arrayList != null) { this.movieArrayList = arrayList; CustomListAdapter adapter = new CustomListAdapter(getActivity(), R.layout.custom_list_row, arrayList); movieList.setAdapter(adapter); } } public void setController(Controller controller, FrameLayout container2) { this.container2 = container2; this.controller = controller; } private class listItemClicked implements android.widget.AdapterView.OnItemClickListener { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { if(lastMoviePressed == position){ controller.hideFragment(); lastMoviePressed = -1; } else{ lastMoviePressed = position; Movie movie = (Movie)movieArrayList.get(position); controller.showFragment(movie); } } } }
6452e02ddce2bd52781868e1ec8196a498e89261
[ "Java" ]
4
Java
KimGustafsson/YouPlay
9bd40e6c02b5aaddf6d3f50d1f8ef67b21c094e1
91a1ff8e769826beef7873a89676fc5d5a3fa915
refs/heads/master
<file_sep>require "bmicalc_b1528at/version" module BmicalcB1528at def self.hi puts "Check! BMI" puts "Please enter your height." h = gets.to_f puts "Please enter your weight." w = gets.to_f bmi = w / ( ( h / 100 ) ** 2) puts "Your BMI is", bmi end end
470ae76715d7a9fe21e8487ec2203e972f8c0fd7
[ "Ruby" ]
1
Ruby
atakei/bmicalc_b1528at
eb65b9d1555c80eef7f49ac9284a60f29180e3a9
05c9295f1824bf1f2bc553b320cdf609e4fa128a
refs/heads/master
<repo_name>xiamingxing/maya<file_sep>/DGbanInfo.h // DGbanInfo.h: interface for the DGbanInfo class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_DGBANINFO_H__9011C27F_CB7A_4E3F_AB8B_389F8AFAC67E__INCLUDED_) #define AFX_DGBANINFO_H__9011C27F_CB7A_4E3F_AB8B_389F8AFAC67E__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class DGbanInfo { public: DGbanInfo(); virtual ~DGbanInfo(); TCHAR m_strUserID[CHAR_NAME_LENGTH + 1]; TCHAR m_DGBAN_BODY[50]; }; #endif // !defined(AFX_DGBANINFO_H__9011C27F_CB7A_4E3F_AB8B_389F8AFAC67E__INCLUDED_) <file_sep>/ValItemTableSet.h #if !defined(AFX_VALITEMTABLESET_H__AAE0F8FF_D432_4933_BB1B_4BAD697EC40C__INCLUDED_) #define AFX_VALITEMTABLESET_H__AAE0F8FF_D432_4933_BB1B_4BAD697EC40C__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // ValItemTableSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CValItemTableSet recordset class CValItemTableSet : public CRecordset { public: CValItemTableSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CValItemTableSet) // Field/Param Data //{{AFX_FIELD(CValItemTableSet, CRecordset) int m_sSid; int m_sValItem01; BYTE m_tPersentVal01; int m_sValItem02; BYTE m_tPersentVal02; int m_sValItem03; BYTE m_tPersentVal03; int m_sValItem04; BYTE m_tPersentVal04; int m_sValItem05; BYTE m_tPersentVal05; int m_sValItem06; BYTE m_tPersentVal06; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CValItemTableSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_VALITEMTABLESET_H__AAE0F8FF_D432_4933_BB1B_4BAD697EC40C__INCLUDED_) <file_sep>/EVENT_DATA.h // EVENT_DATA.h: interface for the EVENT_DATA class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EVENT_DATA_H__7514FC26_511B_11D3_BE41_00105A6B97E2__INCLUDED_) #define AFX_EVENT_DATA_H__7514FC26_511B_11D3_BE41_00105A6B97E2__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class LOGIC; class EXEC; class LOGIC_ELSE; typedef CTypedPtrArray <CPtrArray, LOGIC*> LogicArray; typedef CTypedPtrArray <CPtrArray, EXEC*> ExecArray; typedef CTypedPtrArray <CPtrArray, LOGIC_ELSE*> LogicElseArray; class EVENT_DATA { public: int m_EventNum; LogicArray m_arLogic; ExecArray m_arExec; LogicElseArray m_arLogicElse; EVENT_DATA(); virtual ~EVENT_DATA(); }; #endif // !defined(AFX_EVENT_DATA_H__7514FC26_511B_11D3_BE41_00105A6B97E2__INCLUDED_) <file_sep>/SETItemSet.h #if !defined(AFX_SETITEMSET_H__25296B6D_1498_4A39_B792_40DE4F9E1BD8__INCLUDED_) #define AFX_SETITEMSET_H__25296B6D_1498_4A39_B792_40DE4F9E1BD8__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // SETItemSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CSETItemSet recordset class CSETItemSet : public CRecordset { public: CSETItemSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CSETItemSet) // Field/Param Data //{{AFX_FIELD(CSETItemSet, CRecordset) int m_sSetIndex; CString m_strMainName; BYTE m_bySetCount; BYTE m_bySetAddOption1; BYTE m_bySetAddOption2; BYTE m_bySetAddOption3; BYTE m_bySetAddOption4; BYTE m_bySetAddOption5; BYTE m_bySetAddOption6; BYTE m_bySetAddOption7; BYTE m_bySetAddOption8; BYTE m_bySetAddOption9; int m_sSet2; int m_sSet3; int m_sSet4; int m_sSet5; int m_sSet6; int m_sSet7; int m_sSet8; int m_sSet9; int m_sSet10; int m_sSet11; int m_sSet12; int m_sSet13; int m_sSet14; int m_sSet15; int m_sItem01; int m_sItem02; int m_sItem03; int m_sItem04; int m_sItem05; int m_sItem06; int m_sItem07; int m_sItem08; int m_sItem09; int m_sItem10; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CSETItemSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_SETITEMSET_H__25296B6D_1498_4A39_B792_40DE4F9E1BD8__INCLUDED_) <file_sep>/EVENT.h // EVENT.h: interface for the EVENT class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EVENT_H__7514FC23_511B_11D3_BE41_00105A6B97E2__INCLUDED_) #define AFX_EVENT_H__7514FC23_511B_11D3_BE41_00105A6B97E2__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class EVENT_DATA; typedef CTypedPtrArray <CPtrArray, EVENT_DATA*> EventDataArray; class EVENT { public: void DeleteAll(); void Parsing(char *pBuf); void Init(); BOOL LoadEvent(int zone); int m_Zone; EventDataArray m_arEvent; EVENT(); virtual ~EVENT(); }; #endif // !defined(AFX_EVENT_H__7514FC23_511B_11D3_BE41_00105A6B97E2__INCLUDED_) <file_sep>/TableItem.h // TableItem.h: interface for the TableItem class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_TABLEITEM_H__1A51934E_7C62_45E3_9FE1_AF791407B9BC__INCLUDED_) #define AFX_TABLEITEM_H__1A51934E_7C62_45E3_9FE1_AF791407B9BC__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class TableItem { public: TableItem(); virtual ~TableItem(); public: BOOL GetData(TCHAR* pBuf, int &index); int m_Num; int m_Arm; int m_UseLevel; int m_UseType; int m_PicNum; int m_Wg; int m_Dur; int m_MaxAt; int m_AtDelay; int m_DmgX; int m_DmgY; int m_DmgZ; int m_Df; int m_At; int m_Crit; int m_Range; int m_Out; int m_BullNum; int m_BullType; int m_StErr; int m_StDf; int m_StCure; int m_HP; int m_PP; int m_SP; DWORD m_Cost; // °¡°Ý char m_Name[ITEM_NAME_LENGTH]; }; #endif // !defined(AFX_TABLEITEM_H__1A51934E_7C62_45E3_9FE1_AF791407B9BC__INCLUDED_) <file_sep>/GuildStoreTable.cpp // GuildStoreTable.cpp: implementation of the CGuildStoreTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "GuildStoreTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CGuildStoreTable::CGuildStoreTable() { } CGuildStoreTable::~CGuildStoreTable() { } <file_sep>/SharedQueue.h //////////////////////////////////////////////////////////////////////////////////// // SharedQueue.h // CSharedQueue class header file. // 02/11/13 by zinee #include "SharedMemory.h" #ifndef CSHAREDQUEUE_20021113_CLASS_HEADER #define CSHAREDQUEUE_20021113_CLASS_HEADER #define QUEUE_DSN_SIZE 40 #define QUEUE_USERID_SIZE 32 #define QUEUE_PASSWORD_SIZE 32 #define QUEUE_QUERY_SIZE 10000 struct QUEUE { int nIndex; char Query[QUEUE_QUERY_SIZE]; }; //////////////////////////////////////////////////////////////////////////////////// // class CSharedQueue class CSharedQueue { private: CSharedMemory m_SharedMemory; HANDLE m_hMutex; QUEUE* m_pQueue; int m_nQueueCount; int* m_pnFront; int* m_pnRear; void IncQueuePos( int* pnQueuePos ); BOOL IncFront( void ); BOOL IncRear( void ); BOOL IsExistData(void); BOOL IsFullData(void); public: CSharedQueue(); ~CSharedQueue(); BOOL Create( LPCTSTR pSharedName, int nNumQueue ); BOOL Open( LPCTSTR pSharedName ); BOOL Release( void ); BOOL GetQueue( QUEUE* pQueue ); BOOL PutQueue( QUEUE* pQueue ); }; #endif<file_sep>/NpcTableSet.h #if !defined(AFX_NPCTABLESET_H__3A9EE3C9_2CF7_410A_B39C_6D8A20114937__INCLUDED_) #define AFX_NPCTABLESET_H__3A9EE3C9_2CF7_410A_B39C_6D8A20114937__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // NpcTableSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CNpcTableSet recordset class CNpcTableSet : public CRecordset { public: CNpcTableSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CNpcTableSet) // Field/Param Data //{{AFX_FIELD(CNpcTableSet, CRecordset) int m_sSid; int m_sPid; CString m_strName; int m_sSTR; int m_sDEX; int m_sVOL; int m_sWIS; int m_sMaxHP; int m_sMaxPP; BYTE m_byClass; BYTE m_byClassLevel; long m_sExp; int m_byAX; int m_byAY; int m_byAZ; BYTE m_byRange; int m_sAI; int m_sAttackDelay; BYTE m_byVitalC; BYTE m_byWildShot; BYTE m_byExcitedRate; BYTE m_byIronSkin; BYTE m_byReAttack; BYTE m_bySubAttack; BYTE m_byState; BYTE m_byPsi; BYTE m_byPsiLevel; BYTE m_bySearchRange; int m_sSpeed; int m_sInclination; BYTE m_byColor; int m_sStandTime; BYTE m_tNpcType; int m_sFamilyType; BYTE m_tItemPer; BYTE m_tDnPer; int m_byDefense; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CNpcTableSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_NPCTABLESET_H__3A9EE3C9_2CF7_410A_B39C_6D8A20114937__INCLUDED_) <file_sep>/EBodyIdentifyTableSet.cpp // EBodyIdentifyTableSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "EBodyIdentifyTableSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CEBodyIdentifyTableSet IMPLEMENT_DYNAMIC(CEBodyIdentifyTableSet, CRecordset) CEBodyIdentifyTableSet::CEBodyIdentifyTableSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CEBodyIdentifyTableSet) m_sSid = 0; m_nFields = 1; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CEBodyIdentifyTableSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=drgame"); } CString CEBodyIdentifyTableSet::GetDefaultSQL() { return _T("[dbo].[EBODY_IDENTIFY]"); } void CEBodyIdentifyTableSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CEBodyIdentifyTableSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CEBodyIdentifyTableSet diagnostics #ifdef _DEBUG void CEBodyIdentifyTableSet::AssertValid() const { CRecordset::AssertValid(); } void CEBodyIdentifyTableSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/RecoverRate.cpp // RecoverRate.cpp: implementation of the CRecoverRate class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "RecoverRate.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CRecoverRate::CRecoverRate() { m_sSid = 0; m_sHpSpeedupTime = 0; m_byHpSpeedupLevel = 0; m_sSpSpeedupTime = 0; m_bySpSpeedupLevel = 0; m_sPpSpeedupTime = 0; m_byPpSpeedupLevel = 0; } CRecoverRate::~CRecoverRate() { } <file_sep>/DBItemInfo.h #if !defined(AFX_DBITEMINFO_H__2CB0FA90_27CB_4DFE_AD3E_AABF75E92D0E__INCLUDED_) #define AFX_DBITEMINFO_H__2CB0FA90_27CB_4DFE_AD3E_AABF75E92D0E__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // DBItemInfo.h : header file // ///////////////////////////////////////////////////////////////////////////// // CDBItemInfo recordset class CDBItemInfo : public CRecordset { public: CDBItemInfo(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CDBItemInfo) // Field/Param Data //{{AFX_FIELD(CDBItemInfo, CRecordset) int m_sNum; CString m_strName; long m_iCost; BYTE m_tArm; BYTE m_tUseLevel; BYTE m_tUseType; int m_sPicNum; int m_sWg; int m_sDur; int m_sMaxAt; int m_sAtDelay; BYTE m_tDmgX; BYTE m_tDmgY; BYTE m_tDmgZ; BYTE m_tDf; BYTE m_tAt; BYTE m_tCrit; BYTE m_tRange; BYTE m_tOut; BYTE m_tBullNum; BYTE m_tBullType; BYTE m_tStErr; BYTE m_tStDf; BYTE m_tStCure; BYTE m_tHP; BYTE m_tPP; int m_sSP; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CDBItemInfo) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_DBITEMINFO_H__2CB0FA90_27CB_4DFE_AD3E_AABF75E92D0E__INCLUDED_) <file_sep>/BoxEventTableSet.cpp // BoxEventTableSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "BoxEventTableSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CBoxEventTableSet IMPLEMENT_DYNAMIC(CBoxEventTableSet, CRecordset) CBoxEventTableSet::CBoxEventTableSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CBoxEventTableSet) m_sSid = 0; m_tType = 0; m_sIid = 0; m_sCount = 0; m_strText = _T(""); m_nFields = 5; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CBoxEventTableSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CBoxEventTableSet::GetDefaultSQL() { return _T("[dbo].[BOX_EVENT]"); } void CBoxEventTableSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CBoxEventTableSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Byte(pFX, _T("[tType]"), m_tType); RFX_Int(pFX, _T("[sIid]"), m_sIid); RFX_Int(pFX, _T("[sCount]"), m_sCount); RFX_Text(pFX, _T("[strText]"), m_strText); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CBoxEventTableSet diagnostics #ifdef _DEBUG void CBoxEventTableSet::AssertValid() const { CRecordset::AssertValid(); } void CBoxEventTableSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/ValItemTable.cpp // ValItemTable.cpp: implementation of the CValItemTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "ValItemTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CValItemTable::CValItemTable() { m_sSid = 0; m_sValItem01 = 0; m_tPersentVal01 = 0; m_sValItem02 = 0; m_tPersentVal02 = 0; m_sValItem03 = 0; m_tPersentVal03 = 0; m_sValItem04 = 0; m_tPersentVal04 = 0; m_sValItem05 = 0; m_tPersentVal05 = 0; m_sValItem06 = 0; m_tPersentVal06 = 0; } CValItemTable::~CValItemTable() { } <file_sep>/SummonTable.h // SummonTable.h: interface for the CSummonTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_SUMMONTABLE_H__DE5E7D37_DF4E_4389_A24D_6F7654DE742E__INCLUDED_) #define AFX_SUMMONTABLE_H__DE5E7D37_DF4E_4389_A24D_6F7654DE742E__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CSummonTable { public: CSummonTable(); virtual ~CSummonTable(); short m_sSid; CString m_strName; }; #endif // !defined(AFX_SUMMONTABLE_H__DE5E7D37_DF4E_4389_A24D_6F7654DE742E__INCLUDED_) <file_sep>/NpcPosSet.h #if !defined(AFX_NPCPOSSET_H__E81DE0AC_1CBC_4C97_858F_8D16B460DA15__INCLUDED_) #define AFX_NPCPOSSET_H__E81DE0AC_1CBC_4C97_858F_8D16B460DA15__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // NpcPosSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CNpcPosSet recordset class CNpcPosSet : public CRecordset { public: CNpcPosSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CNpcPosSet) // Field/Param Data //{{AFX_FIELD(CNpcPosSet, CRecordset) int m_sSid; CString m_strName; BYTE m_byType; int m_sZone; int m_sX; int m_sY; int m_sMinX; int m_sMinY; int m_sMaxX; int m_sMaxY; int m_sCount; long m_sRegenTime; int m_sEvent; int m_sEZone; int m_sGuild; int m_sDimension; int m_sHaveItem; int m_sSay; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CNpcPosSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_NPCPOSSET_H__E81DE0AC_1CBC_4C97_858F_8D16B460DA15__INCLUDED_) <file_sep>/SockDataList.h #ifndef _SOCKDATALIST_H #define _SOCKDATALIST_H #include "scdefine.h" #include "Mcommon.h" typedef unsigned char byte; struct _byte16 { BYTE b1; BYTE b2; }; typedef _byte16 BYTE16; union _byte2_word { short i; BYTE16 b; }; typedef _byte2_word BYTE2WORD; class CJSocket; class WAIT_SEND_DATA : public CObject { public: CJSocket *pSocket; int dcount; BYTE *pData; public: WAIT_SEND_DATA() { pSocket = NULL; dcount = 0; pData = NULL; }; ~WAIT_SEND_DATA() { if (pData != NULL) { delete[] pData; pData = NULL; }; }; }; class WAIT_RECV_DATA : public CObject { public: CJSocket *pSocket; int usn; char Id[MAX_ID+1]; int dcount; BYTE *pData; int m_Type; public: WAIT_RECV_DATA() { pSocket = NULL; usn = -1; dcount = 0; pData = NULL; m_Type = -1; }; ~WAIT_RECV_DATA() { if (pData != NULL) { pSocket = NULL; delete[] pData; pData = NULL; }; }; }; /* struct _WAIT_RECV_DATA { public: CJSocket *pSocket; int usn; char Id[MAX_ID+1]; int dcount; BYTE *pData; int m_Type; public: _WAIT_RECV_DATA() { pSocket = NULL; usn = -1; dcount = 0; pData = NULL; m_Type = -1; }; ~_WAIT_RECV_DATA() { if (pData != NULL) { pSocket = NULL; delete[] pData; pData = NULL; }; }; }; typedef _WAIT_RECV_DATA WAIT_RECV_DATA; */ class WAIT_ROOM_DATA : public CObject { public: int roomNumber; int roomDataLength; int room_disp_r; int room_disp_b; char *pRoomData; public: WAIT_ROOM_DATA() { roomNumber = -1; roomDataLength = 0; pRoomData = NULL; }; ~WAIT_ROOM_DATA() { if ( pRoomData != NULL ) { delete[] pRoomData; pRoomData = NULL; } }; }; class WAIT_ROOM_STORE_DATA : public CObject { public: int gameType; int roomNumber; int roomDataLength; int room_disp_r; int room_disp_b; char *pRoomData; public: WAIT_ROOM_STORE_DATA() { roomNumber = -1; roomDataLength = 0; pRoomData = NULL; }; ~WAIT_ROOM_STORE_DATA() { if ( pRoomData != NULL ) { delete[] pRoomData; pRoomData = NULL; } }; }; class WAIT_USER_DATA : public CObject { public: char Id[MAX_ID+1]; int userDispRange; int userDataLength; char *pUserData; public: WAIT_USER_DATA() { Id[0] = '\0'; userDataLength = 0; pUserData = NULL; }; ~WAIT_USER_DATA() { if ( pUserData != NULL ) { delete[] pUserData; pUserData = NULL; } }; }; class WAIT_USER_STORE_DATA : public CObject { public: char Id[MAX_ID+1]; int userDispRange; int userDataLength; int flag; int gameType; char *pUserData; public: WAIT_USER_STORE_DATA() { Id[0] = '\0'; userDataLength = 0; pUserData = NULL; }; ~WAIT_USER_STORE_DATA() { if ( pUserData != NULL ) { delete[] pUserData; pUserData = NULL; } }; }; class CConnMsg : public CObject { public: char *pData; int dLen; public: CConnMsg() { pData = NULL; dLen = 0; }; ~CConnMsg() { if ( pData ) { delete[] pData; pData = NULL; } dLen = 0; }; void SetMsg( char *msg, int dl ) { dLen = dl; pData = new char[dl+1]; memcpy( pData, msg, dl ); }; }; class GAME_INFO_DATA : public CObject { public: CBSocket *pSocket; char IpAddr[32]; int iPort; int gsn; int Type; public: GAME_INFO_DATA() { pSocket = NULL; IpAddr[0] = '\0'; iPort = -1; gsn = -1; Type = -1; }; }; /* class CTotalScoreCell : public CObject { public: int iOrder; char Id[MAX_ID+1]; char Krank[MAX_KRANK+1]; int iTotalScore; //// added by manseek for NetCoin int iNetCoin; //// 2000.4.30 int iTotalFight; int iTotalFightWin; int iTotalFightLost; int iTotalFightTie; public: CTotalScoreCell() {}; ~CTotalScoreCell() {}; }; */ #endif<file_sep>/SETItemCountSet.cpp // SETItemCountSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "SETItemCountSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CSETItemCountSet IMPLEMENT_DYNAMIC(CSETItemCountSet, CRecordset) CSETItemCountSet::CSETItemCountSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CSETItemCountSet) m_sSid = 0; m_sItemIndex = 0; m_iServerMax = 0; m_iServerNow = 0; m_nFields = 4; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CSETItemCountSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame"); } CString CSETItemCountSet::GetDefaultSQL() { return _T("[dbo].[SET_ITEM_COUNT]"); } void CSETItemCountSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CSETItemCountSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Int(pFX, _T("[sItemIndex]"), m_sItemIndex); RFX_Long(pFX, _T("[iServerMax]"), m_iServerMax); RFX_Long(pFX, _T("[iServerNow]"), m_iServerNow); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CSETItemCountSet diagnostics #ifdef _DEBUG void CSETItemCountSet::AssertValid() const { CRecordset::AssertValid(); } void CSETItemCountSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/GuildFortressTaxSet.cpp // GuildFortressTaxSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "GuildFortressTaxSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CGuildFortressTaxSet IMPLEMENT_DYNAMIC(CGuildFortressTaxSet, CRecordset) CGuildFortressTaxSet::CGuildFortressTaxSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CGuildFortressTaxSet) m_sFortressID = 0; m_sStoreID01 = 0; m_sStoreID02 = 0; m_sStoreID03 = 0; m_sStoreID04 = 0; m_sStoreID05 = 0; m_sStoreID06 = 0; m_sStoreID07 = 0; m_sStoreID08 = 0; m_sStoreID09 = 0; m_sStoreID10 = 0; m_nFields = 11; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CGuildFortressTaxSet::GetDefaultConnect() { return _T("ODBC;DSN=drgamenew;UID=drgame;PWD=<PASSWORD>"); } CString CGuildFortressTaxSet::GetDefaultSQL() { return _T("[dbo].[GUILD_FORTRESS_TAX]"); } void CGuildFortressTaxSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CGuildFortressTaxSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sFortressID]"), m_sFortressID); RFX_Int(pFX, _T("[sStoreID01]"), m_sStoreID01); RFX_Int(pFX, _T("[sStoreID02]"), m_sStoreID02); RFX_Int(pFX, _T("[sStoreID03]"), m_sStoreID03); RFX_Int(pFX, _T("[sStoreID04]"), m_sStoreID04); RFX_Int(pFX, _T("[sStoreID05]"), m_sStoreID05); RFX_Int(pFX, _T("[sStoreID06]"), m_sStoreID06); RFX_Int(pFX, _T("[sStoreID07]"), m_sStoreID07); RFX_Int(pFX, _T("[sStoreID08]"), m_sStoreID08); RFX_Int(pFX, _T("[sStoreID09]"), m_sStoreID09); RFX_Int(pFX, _T("[sStoreID10]"), m_sStoreID10); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CGuildFortressTaxSet diagnostics #ifdef _DEBUG void CGuildFortressTaxSet::AssertValid() const { CRecordset::AssertValid(); } void CGuildFortressTaxSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/DGbanInfo.cpp // DGbanInfo.cpp: implementation of the DGbanInfo class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "DGbanInfo.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// DGbanInfo::DGbanInfo() { ::ZeroMemory(m_strUserID, sizeof(m_strUserID)); ::ZeroMemory(m_DGBAN_BODY, sizeof(m_DGBAN_BODY)); } DGbanInfo::~DGbanInfo() { } <file_sep>/ThrowItem.h // ThrowItem.h: interface for the CThrowItem class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_THROWITEM_H__D4630979_CF80_4456_92D1_F090B85F12B4__INCLUDED_) #define AFX_THROWITEM_H__D4630979_CF80_4456_92D1_F090B85F12B4__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #include "Map.h" class CThrowItem { public: int m_z; int m_x; int m_y; int ID; // LONG lRef; // LONG lExistRef; DWORD dTime;//物品产生时间 ItemList* m_pItem; class CThrowItem *next;//下一个结点 class CThrowItem *front;//上一个结点 CThrowItem(); virtual ~CThrowItem(); }; #endif // !defined(AFX_THROWITEM_H__D4630979_CF80_4456_92D1_F090B85F12B4__INCLUDED_) <file_sep>/StoreSellSet.cpp // StoreSellSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "StoreSellSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CStoreSellSet IMPLEMENT_DYNAMIC(CStoreSellSet, CRecordset) CStoreSellSet::CStoreSellSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CStoreSellSet) m_sStoreID = 0; m_sItemID = 0; m_nFields = 2; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CStoreSellSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CStoreSellSet::GetDefaultSQL() { return _T("[dbo].[STORE_SELL]"); } void CStoreSellSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CStoreSellSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sStoreID]"), m_sStoreID); RFX_Int(pFX, _T("[sItemID]"), m_sItemID); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CStoreSellSet diagnostics #ifdef _DEBUG void CStoreSellSet::AssertValid() const { CRecordset::AssertValid(); } void CStoreSellSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/EBodyUpgradeTable.h // EBodyUpgradeTable.h: interface for the CEBodyUpgradeTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EBODYUPGRADETABLE_H__15DEC1FD_6AB1_4BF6_8516_1CAB98FDCE18__INCLUDED_) #define AFX_EBODYUPGRADETABLE_H__15DEC1FD_6AB1_4BF6_8516_1CAB98FDCE18__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CEBodyUpgradeTable { public: CEBodyUpgradeTable(); virtual ~CEBodyUpgradeTable(); BYTE m_tRandom1; BYTE m_tRandom2; BYTE m_tRandom3; }; #endif // !defined(AFX_EBODYUPGRADETABLE_H__15DEC1FD_6AB1_4BF6_8516_1CAB98FDCE18__INCLUDED_) <file_sep>/PathFind.h // PathFind.h: interface for the CPathFind class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_PATHFIND_H__04C22295_0166_11D3_B49A_00C02615866B__INCLUDED_) #define AFX_PATHFIND_H__04C22295_0166_11D3_B49A_00C02615866B__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class NODE { public: int f; int h; int g; int x; int y; NODE *Parent; NODE *Child[8]; NODE *NextNode; }; class STACK { public: NODE *NodePtr; STACK *NextStackPtr; }; class CPathFind { public: BOOL IsBlankMap(int x, int y); // void SetMap(int x, int y, int **map); void SetMap(int x, int y, int *map); void PropagateDown(NODE *old); void Insert(NODE *node); NODE *CheckOpen(int x, int y); NODE *CheckClosed(int x, int y); void FindChildPathSub(NODE *node, int x, int y, int dx, int dy, int arg); void FindChildPath(NODE *node, int dx, int dy); void ClearData(); NODE *ReturnBestNode(); NODE *FindPath(int start_x, int start_y, int dest_x, int dest_y); CPathFind(); virtual ~CPathFind(); void Push(NODE *node); NODE *Pop(); #ifdef _DEBUG NODE *FindPath(CDC& dc, int cellx, int celly, int start_x, int start_y, int dest_x, int dest_y); void DisplayData(int x, int y, int value); #endif protected: LONG m_lMapUse; NODE *m_pOpen, *m_pClosed; STACK *m_pStack; // int **m_pMap; int *m_pMap; CSize m_vMapSize; #ifdef _DEBUG CDC *m_pDC; CSize m_vCell; #endif }; #endif // !defined(AFX_PATHFIND_H__04C22295_0166_11D3_B49A_00C02615866B__INCLUDED_) <file_sep>/SkillTableSet.cpp // SkillTableSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "SkillTableSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CSkillTableSet IMPLEMENT_DYNAMIC(CSkillTableSet, CRecordset) CSkillTableSet::CSkillTableSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CSkillTableSet) m_sPid = 0; m_sSid = 0; m_tClass = 0; m_tInc = 0; m_tLevel = 0; m_tRepair = 0; m_tSuccess = 0; m_nFields = 7; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CSkillTableSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CSkillTableSet::GetDefaultSQL() { return _T("[dbo].[SKILL]"); } void CSkillTableSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CSkillTableSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sPid]"), m_sPid); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Byte(pFX, _T("[tClass]"), m_tClass); RFX_Int(pFX, _T("[tInc]"), m_tInc); RFX_Byte(pFX, _T("[tLevel]"), m_tLevel); RFX_Byte(pFX, _T("[tRepair]"), m_tRepair); RFX_Byte(pFX, _T("[tSuccess]"), m_tSuccess); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CSkillTableSet diagnostics #ifdef _DEBUG void CSkillTableSet::AssertValid() const { CRecordset::AssertValid(); } void CSkillTableSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/GuildStoreSet.h #if !defined(AFX_GUILDSTORESET_H__6A62B5B7_5302_4450_B79E_28D8C20E892F__INCLUDED_) #define AFX_GUILDSTORESET_H__6A62B5B7_5302_4450_B79E_28D8C20E892F__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // GuildStoreSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CGuildStoreSet recordset class CGuildStoreSet : public CRecordset { public: CGuildStoreSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CGuildStoreSet) // Field/Param Data //{{AFX_FIELD(CGuildStoreSet, CRecordset) int m_sStoreID; long m_iGuildSid; CString m_strGuildName; CString m_strMasterName; int m_sTaxRate; long m_iGuildDN; CTime m_GetTime; CTime m_WarTime; CTime m_WarPlan; BYTE m_tWarType; CByteArray m_strAttackList; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CGuildStoreSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_GUILDSTORESET_H__6A62B5B7_5302_4450_B79E_28D8C20E892F__INCLUDED_) <file_sep>/EventZone.h // EventZone.h: interface for the CEventZone class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EVENTZONE_H__682FC698_6EF0_4D7D_8E16_3F4FB2D094FA__INCLUDED_) #define AFX_EVENTZONE_H__682FC698_6EF0_4D7D_8E16_3F4FB2D094FA__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #include "GuildHouseWar.h" #define MAX_EVENT_USER 4 // 현재 최대 인원수는 버디인원수로 제한한다. #define MAX_SINGLE_EVENT 5 // struct _EVENT_ZONE { long m_lUsed; long m_lCurNum; short m_sEventZone; BYTE m_tSuccessType; BYTE m_tStartType; BYTE m_tEventTime; DWORD m_dwStartTime; DWORD m_dwInterTick; UserList m_arUserList[MAX_EVENT_USER]; CWordArray m_arNpcList; }; class CEventZone { public: void SingleInit(); int GetEmptyEventZone(); void RemoveUserInEventZone(int iEventZone); void CheckSingleEventZoneWarEnd(int iZone); CEventZone(); virtual ~CEventZone(); public: void CheckUserStateInEventZone(int iZone); CWordArray m_arEventZone; _EVENT_ZONE m_SingleEventZone[MAX_SINGLE_EVENT]; }; #endif // !defined(AFX_EVENTZONE_H__682FC698_6EF0_4D7D_8E16_3F4FB2D094FA__INCLUDED_) <file_sep>/EBodyUpgradeTable.cpp // EBodyUpgradeTable.cpp: implementation of the CEBodyUpgradeTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "EBodyUpgradeTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CEBodyUpgradeTable::CEBodyUpgradeTable() { m_tRandom1 = 0; m_tRandom2 = 0; m_tRandom3 = 0; } CEBodyUpgradeTable::~CEBodyUpgradeTable() { } <file_sep>/DressingSet.cpp // DressingSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "DressingSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CDressingSet IMPLEMENT_DYNAMIC(CDressingSet, CRecordset) CDressingSet::CDressingSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CDressingSet) m_sSid = 0; m_sItemNum = 0; m_sCountMin = 0; m_sCountMax = 0; m_sItem01 = 0; m_sItemRatio01 = 0; m_sItem02 = 0; m_sItemRatio02 = 0; m_sItem03 = 0; m_sItemRatio03 = 0; m_sItem04 = 0; m_sItemRatio04 = 0; m_sItem05 = 0; m_sItemRatio05 = 0; m_sItem06 = 0; m_sItemRatio06 = 0; m_sItem07 = 0; m_sItemRatio07 = 0; m_sItem08 = 0; m_sItemRatio08 = 0; m_sItem09 = 0; m_sItemRatio09 = 0; m_sItem10 = 0; m_sItemRatio10 = 0; m_nFields = 24; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CDressingSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CDressingSet::GetDefaultSQL() { return _T("[dbo].[DRESSING]"); } void CDressingSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CDressingSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Int(pFX, _T("[sItemNum]"), m_sItemNum); RFX_Int(pFX, _T("[sCountMin]"), m_sCountMin); RFX_Int(pFX, _T("[sCountMax]"), m_sCountMax); RFX_Int(pFX, _T("[sItem01]"), m_sItem01); RFX_Int(pFX, _T("[sItemRatio01]"), m_sItemRatio01); RFX_Int(pFX, _T("[sItem02]"), m_sItem02); RFX_Int(pFX, _T("[sItemRatio02]"), m_sItemRatio02); RFX_Int(pFX, _T("[sItem03]"), m_sItem03); RFX_Int(pFX, _T("[sItemRatio03]"), m_sItemRatio03); RFX_Int(pFX, _T("[sItem04]"), m_sItem04); RFX_Int(pFX, _T("[sItemRatio04]"), m_sItemRatio04); RFX_Int(pFX, _T("[sItem05]"), m_sItem05); RFX_Int(pFX, _T("[sItemRatio05]"), m_sItemRatio05); RFX_Int(pFX, _T("[sItem06]"), m_sItem06); RFX_Int(pFX, _T("[sItemRatio06]"), m_sItemRatio06); RFX_Int(pFX, _T("[sItem07]"), m_sItem07); RFX_Int(pFX, _T("[sItemRatio07]"), m_sItemRatio07); RFX_Int(pFX, _T("[sItem08]"), m_sItem08); RFX_Int(pFX, _T("[sItemRatio08]"), m_sItemRatio08); RFX_Int(pFX, _T("[sItem09]"), m_sItem09); RFX_Int(pFX, _T("[sItemRatio09]"), m_sItemRatio09); RFX_Int(pFX, _T("[sItem10]"), m_sItem10); RFX_Int(pFX, _T("[sItemRatio10]"), m_sItemRatio10); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CDressingSet diagnostics #ifdef _DEBUG void CDressingSet::AssertValid() const { CRecordset::AssertValid(); } void CDressingSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/EventItemTable.cpp // EventItemTable.cpp: implementation of the CEventItemTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "EventItemTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CEventItemTable::CEventItemTable() { m_tEnd = 0; m_sSid = 0; m_tType = 0; m_tGiveFlag = 0; m_tGiveItem = 0; ::ZeroMemory(m_strSerialNum, sizeof(m_strSerialNum)); m_dwTick = 0; m_lUsed = 0; } CEventItemTable::~CEventItemTable() { } <file_sep>/HuanshiTable.cpp ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "HuanshiTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CHuanshiTable::CHuanshiTable() { m_sChangeValue = 0; m_sRandom = 0; m_sSubType = 0; m_tLevel = 0; m_tNeedClass = 0; m_tSid = 0; m_tUpgrade = 0; m_tWearInfo = 0; } CHuanshiTable::~CHuanshiTable() { } int CHuanshiTable::GetMagicItemValue1() { return m_sChangeValue; } <file_sep>/ov.h #ifndef _OV_H_ #define _OV_H_ struct OV: public OVERLAPPED { OV() { Internal = InternalHigh = Offset = OffsetHigh = 0; hEvent = 0; } }; #endif<file_sep>/NpcItemSet.cpp // NpcItemSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "NpcItemSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CNpcItemSet IMPLEMENT_DYNAMIC(CNpcItemSet, CRecordset) CNpcItemSet::CNpcItemSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CNpcItemSet) m_sIndex = 0; m_sItem01 = 0; m_sItem02 = 0; m_sItem03 = 0; m_sItem04 = 0; m_sItem05 = 0; m_sItem06 = 0; m_sItem07 = 0; m_sItem08 = 0; m_sItem09 = 0; m_sItem10 = 0; m_sItem11 = 0; m_sItem12 = 0; m_sItem13 = 0; m_sItem14 = 0; m_sItem15 = 0; m_sItem16 = 0; m_sItem17 = 0; m_sItem18 = 0; m_sItem19 = 0; m_sItem20 = 0; m_sPersent01 = 0; m_sPersent02 = 0; m_sPersent03 = 0; m_sPersent04 = 0; m_sPersent05 = 0; m_sPersent06 = 0; m_sPersent07 = 0; m_sPersent08 = 0; m_sPersent09 = 0; m_sPersent10 = 0; m_sPersent11 = 0; m_sPersent12 = 0; m_sPersent13 = 0; m_sPersent14 = 0; m_sPersent15 = 0; m_sPersent16 = 0; m_sPersent17 = 0; m_sPersent18 = 0; m_sPersent19 = 0; m_sPersent20 = 0; m_nFields = 41; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CNpcItemSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CNpcItemSet::GetDefaultSQL() { return _T("[dbo].[MONSTER_ITEM]"); } void CNpcItemSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CNpcItemSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sIndex]"), m_sIndex); RFX_Int(pFX, _T("[sItem01]"), m_sItem01); RFX_Int(pFX, _T("[sPersent01]"), m_sPersent01); RFX_Int(pFX, _T("[sItem02]"), m_sItem02); RFX_Int(pFX, _T("[sPersent02]"), m_sPersent02); RFX_Int(pFX, _T("[sItem03]"), m_sItem03); RFX_Int(pFX, _T("[sPersent03]"), m_sPersent03); RFX_Int(pFX, _T("[sItem04]"), m_sItem04); RFX_Int(pFX, _T("[sPersent04]"), m_sPersent04); RFX_Int(pFX, _T("[sItem05]"), m_sItem05); RFX_Int(pFX, _T("[sPersent05]"), m_sPersent05); RFX_Int(pFX, _T("[sItem06]"), m_sItem06); RFX_Int(pFX, _T("[sPersent06]"), m_sPersent06); RFX_Int(pFX, _T("[sItem07]"), m_sItem07); RFX_Int(pFX, _T("[sPersent07]"), m_sPersent07); RFX_Int(pFX, _T("[sItem08]"), m_sItem08); RFX_Int(pFX, _T("[sPersent08]"), m_sPersent08); RFX_Int(pFX, _T("[sItem09]"), m_sItem09); RFX_Int(pFX, _T("[sPersent09]"), m_sPersent09); RFX_Int(pFX, _T("[sItem10]"), m_sItem10); RFX_Int(pFX, _T("[sPersent10]"), m_sPersent10); RFX_Int(pFX, _T("[sItem11]"), m_sItem11); RFX_Int(pFX, _T("[sPersent11]"), m_sPersent11); RFX_Int(pFX, _T("[sItem12]"), m_sItem12); RFX_Int(pFX, _T("[sPersent12]"), m_sPersent12); RFX_Int(pFX, _T("[sItem13]"), m_sItem13); RFX_Int(pFX, _T("[sPersent13]"), m_sPersent13); RFX_Int(pFX, _T("[sItem14]"), m_sItem14); RFX_Int(pFX, _T("[sPersent14]"), m_sPersent14); RFX_Int(pFX, _T("[sItem15]"), m_sItem15); RFX_Int(pFX, _T("[sPersent15]"), m_sPersent15); RFX_Int(pFX, _T("[sItem16]"), m_sItem16); RFX_Int(pFX, _T("[sPersent16]"), m_sPersent16); RFX_Int(pFX, _T("[sItem17]"), m_sItem17); RFX_Int(pFX, _T("[sPersent17]"), m_sPersent17); RFX_Int(pFX, _T("[sItem18]"), m_sItem18); RFX_Int(pFX, _T("[sPersent18]"), m_sPersent18); RFX_Int(pFX, _T("[sItem19]"), m_sItem19); RFX_Int(pFX, _T("[sPersent19]"), m_sPersent19); RFX_Int(pFX, _T("[sItem20]"), m_sItem20); RFX_Int(pFX, _T("[sPersent20]"), m_sPersent20); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CNpcItemSet diagnostics #ifdef _DEBUG void CNpcItemSet::AssertValid() const { CRecordset::AssertValid(); } void CNpcItemSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/MESSAGE.h // MESSAGE.h: interface for the MESSAGE class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_MESSAGE_H__6DE52814_F08F_4A8E_B63B_A5166B4924CB__INCLUDED_) #define AFX_MESSAGE_H__6DE52814_F08F_4A8E_B63B_A5166B4924CB__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class MESSAGE { public: MESSAGE(); virtual ~MESSAGE(); short m_ID; TCHAR m_MESSAGE_BODY[500]; }; #endif // !defined(AFX_MESSAGE_H__6DE52814_F08F_4A8E_B63B_A5166B4924CB__INCLUDED_) <file_sep>/ShopSystem.h #pragma once struct ShopItemData { int iSid; int iNum; char BuyName[255]; int iDn; int iAttribute[5]; int iUpgradeNum; int iColor; }; class ShopSystem { public: int m_QanBao_KG;//元宝券,JZB包开关 int m_iAllTime; //每间隔多少秒加元宝 int m_iAllDnNum; //每间隔多少元宝 int m_iZoneNum; int m_iZone[255]; //地图ID int m_iZoneTime; //每间隔多少秒加元宝 int m_iZoneDnNum; //每间隔多少元宝 ShopItemData m_Item[512]; int m_ItemNum; public: ShopSystem(void); ~ShopSystem(void); BOOL ShopLoadConfig(); BOOL IsZone(int iZone); int IsBuyItemName(char *name); }; <file_sep>/AddEventItemTableSet.cpp // AddEventItemTableSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "AddEventItemTableSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CAddEventItemTableSet IMPLEMENT_DYNAMIC(CAddEventItemTableSet, CRecordset) CAddEventItemTableSet::CAddEventItemTableSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CAddEventItemTableSet) m_sSid = 0; m_tType = 0; m_tGiveFlag = 0; m_tEnd = 0; m_tUsed = 0; m_strSerialNum = _T(""); m_nFields = 6; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CAddEventItemTableSet::GetDefaultConnect() { return _T("ODBC;DSN=drgamenew;UID=drgame;PWD=<PASSWORD>"); } CString CAddEventItemTableSet::GetDefaultSQL() { return _T("[dbo].[EVENT_TABLE_ADD1]"); } void CAddEventItemTableSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CAddEventItemTableSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Byte(pFX, _T("[tType]"), m_tType); RFX_Byte(pFX, _T("[tGiveFlag]"), m_tGiveFlag); RFX_Byte(pFX, _T("[tEnd]"), m_tEnd); RFX_Byte(pFX, _T("[tUsed]"), m_tUsed); RFX_Text(pFX, _T("[strSerialNum]"), m_strSerialNum); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CAddEventItemTableSet diagnostics #ifdef _DEBUG void CAddEventItemTableSet::AssertValid() const { CRecordset::AssertValid(); } void CAddEventItemTableSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/PsiStoreSet.cpp // PsiStoreSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "PsiStoreSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CPsiStoreSet IMPLEMENT_DYNAMIC(CPsiStoreSet, CRecordset) CPsiStoreSet::CPsiStoreSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CPsiStoreSet) m_sStoreID = 0; m_sPsiID = 0; m_nFields = 2; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CPsiStoreSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CPsiStoreSet::GetDefaultSQL() { return _T("[dbo].[PSI_STORE]"); } void CPsiStoreSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CPsiStoreSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sStoreID]"), m_sStoreID); RFX_Int(pFX, _T("[sPsiID]"), m_sPsiID); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CPsiStoreSet diagnostics #ifdef _DEBUG void CPsiStoreSet::AssertValid() const { CRecordset::AssertValid(); } void CPsiStoreSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/DressingSet.h #if !defined(AFX_DRESSINGSET_H__AFC036DE_1CE0_48A0_9781_DD0CF2CD176E__INCLUDED_) #define AFX_DRESSINGSET_H__AFC036DE_1CE0_48A0_9781_DD0CF2CD176E__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // DressingSet.h : header file // typedef struct _DRESSING_DATA { int sSid; int sItemSid; int sCountMin; int sCountMax; int sItem[10]; int sItemRatio[10]; } DRESSING_DATA; typedef struct _DRESSING_RESULT { int sSid; int sCount; } DRESSING_RESULT; typedef CTypedPtrArray <CPtrArray, DRESSING_RESULT*> DressingResultArray; ///////////////////////////////////////////////////////////////////////////// // CDressingSet recordset class CDressingSet : public CRecordset { public: CDressingSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CDressingSet) // Field/Param Data //{{AFX_FIELD(CDressingSet, CRecordset) int m_sSid; int m_sItemNum; int m_sCountMin; int m_sCountMax; int m_sItem01; int m_sItemRatio01; int m_sItem02; int m_sItemRatio02; int m_sItem03; int m_sItemRatio03; int m_sItem04; int m_sItemRatio04; int m_sItem05; int m_sItemRatio05; int m_sItem06; int m_sItemRatio06; int m_sItem07; int m_sItemRatio07; int m_sItem08; int m_sItemRatio08; int m_sItem09; int m_sItemRatio09; int m_sItem10; int m_sItemRatio10; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CDressingSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_DRESSINGSET_H__AFC036DE_1CE0_48A0_9781_DD0CF2CD176E__INCLUDED_) <file_sep>/GuildMapInfoTable.cpp // GuildMapInfoTable.cpp: implementation of the CGuildMapInfoTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "GuildMapInfoTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CGuildMapInfoTable::CGuildMapInfoTable() { } CGuildMapInfoTable::~CGuildMapInfoTable() { } <file_sep>/YhuoIni.cpp // YhuoIni.cpp: implementation of the YhuoIni class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "YhuoIni.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// YhuoIni::YhuoIni() { //[WEBURL] ::ZeroMemory(WEB_URL, sizeof(WEB_URL)); } YhuoIni::~YhuoIni() { } <file_sep>/MyDB.h // MyDB.h: interface for the CMyDB class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_MYDB_H__15818A22_88EA_4A91_8F64_8669DF55DEB6__INCLUDED_) #define AFX_MYDB_H__15818A22_88EA_4A91_8F64_8669DF55DEB6__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #define MAX_DB_USE_TIME 2000 typedef struct _DB_CONNECTION { CDatabase db; BOOL bConnect; LONG lRef; int iCount; DWORD dwGetDBTime; // DWORD dwUseTime; } DB_CONNECTION; typedef CTypedPtrArray <CPtrArray, DB_CONNECTION*> DBArray; class CMyDB { public: void ReleaseDB(int index); CDatabase* GetDB(int &index); BOOL DBConnect(LPCTSTR strConnect); void Init(int num); DBArray m_arDB; int m_iTotalConnection; CString m_strConnect; CMyDB(); virtual ~CMyDB(); }; #endif // !defined(AFX_MYDB_H__15818A22_88EA_4A91_8F64_8669DF55DEB6__INCLUDED_) <file_sep>/CityNpcTableSet.cpp // CityNpcTableSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "CityNpcTableSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CCityNpcTableSet IMPLEMENT_DYNAMIC(CCityNpcTableSet, CRecordset) CCityNpcTableSet::CCityNpcTableSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CCityNpcTableSet) m_sSid = 0; m_sPid = 0; m_strName = _T(""); m_sSTR = 0; m_sDEX = 0; m_sVOL = 0; m_sWIS = 0; m_sMaxHP = 0; m_sMaxPP = 0; m_byClass = 0; m_byClassLevel = 0; m_sExp = 0; m_byAX = 0; m_byAY = 0; m_byAZ = 0; m_byRange = 0; m_sAI = 0; m_sAttackDelay = 0; m_byVitalC = 0; m_byWildShot = 0; m_byIronSkin = 0; m_byReAttack = 0; m_bySubAttack = 0; m_byState = 0; m_byPsi = 0; m_byPsiLevel = 0; m_bySearchRange = 0; m_sSpeed = 0; m_sInclination = 0; m_byColor = 0; m_sStandTime = 0; m_tNpcType = 0; m_sFamilyType = 0; m_tItemPer = 0; m_tDnPer = 0; m_byDefense = 0; m_byExciteRate = 0; m_nFields = 37; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CCityNpcTableSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CCityNpcTableSet::GetDefaultSQL() { return _T("[dbo].[NPC]"); } void CCityNpcTableSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CCityNpcTableSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Int(pFX, _T("[sPid]"), m_sPid); RFX_Text(pFX, _T("[strName]"), m_strName); RFX_Int(pFX, _T("[sSTR]"), m_sSTR); RFX_Int(pFX, _T("[sDEX]"), m_sDEX); RFX_Int(pFX, _T("[sVOL]"), m_sVOL); RFX_Int(pFX, _T("[sWIS]"), m_sWIS); RFX_Int(pFX, _T("[sMaxHP]"), m_sMaxHP); RFX_Int(pFX, _T("[sMaxPP]"), m_sMaxPP); RFX_Byte(pFX, _T("[byClass]"), m_byClass); RFX_Byte(pFX, _T("[byClassLevel]"), m_byClassLevel); RFX_Int(pFX, _T("[sExp]"), m_sExp); RFX_Byte(pFX, _T("[byAX]"), m_byAX); RFX_Byte(pFX, _T("[byAY]"), m_byAY); RFX_Byte(pFX, _T("[byAZ]"), m_byAZ); RFX_Byte(pFX, _T("[byRange]"), m_byRange); RFX_Int(pFX, _T("[sAI]"), m_sAI); RFX_Int(pFX, _T("[sAttackDelay]"), m_sAttackDelay); RFX_Byte(pFX, _T("[byVitalC]"), m_byVitalC); RFX_Byte(pFX, _T("[byWildShot]"), m_byWildShot); RFX_Byte(pFX, _T("[byIronSkin]"), m_byIronSkin); RFX_Byte(pFX, _T("[byReAttack]"), m_byReAttack); RFX_Byte(pFX, _T("[bySubAttack]"), m_bySubAttack); RFX_Byte(pFX, _T("[byState]"), m_byState); RFX_Byte(pFX, _T("[byPsi]"), m_byPsi); RFX_Byte(pFX, _T("[byPsiLevel]"), m_byPsiLevel); RFX_Byte(pFX, _T("[bySearchRange]"), m_bySearchRange); RFX_Int(pFX, _T("[sSpeed]"), m_sSpeed); RFX_Int(pFX, _T("[sInclination]"), m_sInclination); RFX_Byte(pFX, _T("[byColor]"), m_byColor); RFX_Int(pFX, _T("[sStandTime]"), m_sStandTime); RFX_Byte(pFX, _T("[tNpcType]"), m_tNpcType); RFX_Int(pFX, _T("[sFamilyType]"), m_sFamilyType); RFX_Byte(pFX, _T("[tItemPer]"), m_tItemPer); RFX_Byte(pFX, _T("[tDnPer]"), m_tDnPer); RFX_Int(pFX, _T("[byDefense]"), m_byDefense); RFX_Byte(pFX, _T("[byExciteRate]"), m_byExciteRate); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CCityNpcTableSet diagnostics #ifdef _DEBUG void CCityNpcTableSet::AssertValid() const { CRecordset::AssertValid(); } void CCityNpcTableSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/EBodyIdentifyTable.h // EBodyIdentifyTable.h: interface for the CEBodyIdentifyTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EBODYIDENTIFYTABLE_H__9C0BBC3E_971C_4C98_8267_62C2E7DDA180__INCLUDED_) #define AFX_EBODYIDENTIFYTABLE_H__9C0BBC3E_971C_4C98_8267_62C2E7DDA180__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CEBodyIdentifyTable { public: CEBodyIdentifyTable(); virtual ~CEBodyIdentifyTable(); short m_sSid; }; #endif // !defined(AFX_EBODYIDENTIFYTABLE_H__9C0BBC3E_971C_4C98_8267_62C2E7DDA180__INCLUDED_) <file_sep>/LevelUpTable.cpp // LevelUpTable.cpp: implementation of the CLevelUpTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "LevelUpTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CLevelUpTable::CLevelUpTable() { m_sLevel = 1; m_dwExp = 0; } CLevelUpTable::~CLevelUpTable() { } <file_sep>/COM.cpp /////////////////////////////////////////////////////////////////// // COM.cpp : implementation file for the COM class // Copyright (C) 2000, <NAME> // // If this code works, it was written by <NAME>. // If not, I don't know who wrote it. // #include "stdafx.h" #include "Server.h" #include "USER.h" #include "COM.h" #include "BufferEx.h" #include "Extern.h" #include <process.h> #include "ServerDlg.h" #include "Search.h" #include "RoyalRumble.h" #include "EventZone.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif #define GUILD_BEGIN_WAR_TIME 10 ////////////////////////////////////////////////////////////////////// // Global Variable // IKING 2001.1. extern CServerDlg *g_pMainDlg; extern int MAXPOSSIBLE_USER; extern CSearch *g_pUserList; extern CRITICAL_SECTION m_CS_SqlData; extern long nSqlDataCount; extern CPtrList RecvSqlData; extern CRoyalRumble g_RR; CString winName; HANDLE g_hIOCP = NULL; SOCKET g_sdListen = INVALID_SOCKET; WSAEVENT g_hListenEvent = WSA_INVALID_EVENT; _int64 g_OnlineEnd = 0; _int64 g_OnlineMinStart = 0; _int64 g_Online_Update_Min_ticks = 0; long g_GameTime = 0; long g_GameDay = 0; long g_GameMinute = 0; int PKnum; //PK剩余人数 DWORD ShaRenNum; //杀人王最大值 CString ShaRenName; //杀人王姓名 BOOL ShaRenEnd; BOOL PKover; //标识PK结束 volatile long g_bShutDown = 0; int mIndex = 0; // Quest Event Class CEventZone g_QuestEventZone; /////////////////////////////////////////////////////////////////////////// // Global functions //DWORD WINAPI SendThreadMain(LPVOID pVoid) //unsigned __stdcall SendThreadMain( void *pVoid ) UINT SendThreadMain( void *pVoid ) { COM* pCom = (COM*)pVoid; int nRet = 0; int iRemainCount = 0; DWORD dwBytesTransferred = 0; DWORD dwKey = 0; LPOVERLAPPED pOverlapped = NULL; SEND_DATA* pSendData = NULL; SEND_DATA* pNewData = NULL; int i = 0; int modsid = 0; CRITICAL_SECTION* pSendCS = NULL; CPtrList LocalDataList; int nLocalDataCount = 0; while( TRUE ) { nRet = ::GetQueuedCompletionStatus(pCom->m_hSendIOCP, &dwBytesTransferred, &dwKey, &pOverlapped, INFINITE); // assume we are always using an INFINITE timeout if( nRet == FALSE && pOverlapped == NULL ) { TRACE("####Error In SendThreadMain()\n"); break; } if( nRet == FALSE ) continue; modsid = (int)dwKey; if( modsid < 0 || modsid > AUTOMATA_THREAD ) continue; pSendCS = &(pCom->m_critSendData[modsid]); if( !pSendCS ) continue; pCom->nSendDataCount[modsid] = pCom->SendDataList[modsid].GetCount(); if( pCom->nSendDataCount[modsid] < 1 ) continue; EnterCriticalSection( pSendCS ); while( pCom->nSendDataCount[modsid] >= 1 ) { pSendData = (SEND_DATA*)pCom->SendDataList[modsid].RemoveHead(); pCom->nSendDataCount[modsid] = pCom->SendDataList[modsid].GetCount(); if( pSendData ) { LocalDataList.AddTail( pSendData ); } } LeaveCriticalSection( pSendCS ); nLocalDataCount = LocalDataList.GetCount(); while( nLocalDataCount >= 1 ) { pSendData = (SEND_DATA*)LocalDataList.RemoveHead(); nLocalDataCount = LocalDataList.GetCount(); if( !pSendData ) continue; pCom->Send( pSendData ); delete pSendData; pSendData = NULL; } } return 0; } //DWORD WINAPI TimerThread(LPVOID lParam) //unsigned __stdcall TimerThread(void *lParam) UINT TimerThread(void *lParam) { COM* pCom = (COM*) lParam; BOOL bRain = FALSE; int i, iTime = 0; int iUserCountUpdate = 0; int iRainTime[30] = {1, 1, 14, 11, 14, 16, 11, 13, 16, 12, 11, 12, 13, 11, 14, 10, 11, 13, 15, 12, 14, 12, 9, 11, 14, 16, 16, 13, 16, 12}; g_GameTime = 3; // 抛胶飘甫 困秦... while(TRUE) { //线程退出 if(g_bShutDown) break; pCom->CheckCurrentUserTime(); pCom->CheckMessageTime(); pCom->CheckPKTime(); pCom->CheckPKShaRenTime(); pCom->CheckDevilTime(); pCom->CheckGuildWarTime(); // 辨靛傈牢瘤 眉农茄促. pCom->UpdateUnLineUserTime(); ////离线更新数据 pCom->CheckFortressWarTime(); g_RR.CheckRRStatus(); QueryPerformanceCounter((LARGE_INTEGER*)&g_OnlineEnd); if((g_OnlineEnd - g_OnlineMinStart) >= g_Online_Update_Min_ticks) { g_OnlineMinStart += g_Online_Update_Min_ticks; InterlockedExchangeAdd(&g_GameMinute, 10); // 公炼扒 10盒究 刘啊 // 60盒 捞搁 0栏肺 檬扁拳 if(g_GameMinute == 60) { ::InterlockedExchange(&g_GameMinute, 0); InterlockedExchangeAdd(&g_GameTime, 1); // 1矫埃究 刘啊茄促. if(g_GameTime == 24) { InterlockedExchangeAdd(&g_GameDay, 1); // 24矫捞搁 窍风 刘啊茄促. ::InterlockedExchange(&g_GameTime, 1); // 矫埃篮 1矫肺 悸泼 } if(g_GameDay == 31) ::InterlockedExchange(&g_GameDay, 1);// 朝楼档 窍风 pCom->SendTimeToAll(); if(iRainTime[g_GameDay] == g_GameTime) { // TRACE("厚棵矫埃牢单...Day : %d, Time : %d\n", g_GameDay, iRainTime[g_GameDay]); for(i = 0; i < g_WeatherZone.GetSize(); i++) { // if(rand()%2 == 0) //犬伏 50% // { // TRACE("厚啊柯促...Day : %d, Time : %d\n", g_GameDay, iRainTime[g_GameDay]); bRain = TRUE; iTime = g_GameTime; g_WeatherZone[i]->bRaining = TRUE; pCom->SetWeatherZone(g_WeatherZone[i]->iZone, 2, 1); // } } } if(bRain && (iTime+2 == g_GameTime)) { // TRACE("厚~场...Day : %d, Time : %d\n", g_GameDay, iRainTime[g_GameDay]); for(i = 0; i < g_WeatherZone.GetSize(); i++) { if(g_WeatherZone[i]->bRaining) { bRain = FALSE; g_WeatherZone[i]->bRaining = FALSE; pCom->SetWeatherZone(g_WeatherZone[i]->iZone, 2, 0); } } } } // TRACE("瘤陛 盒俊辑绰 矫埃 %d : %d盒\n", g_GameTime, g_GameMinute); } Sleep(1000); /* iUserCountUpdate++; if( iUserCountUpdate >= 600 ) { iUserCountUpdate = 0; pCom->UpdateUserCount(); */ /* int Datalength; SQLDATAPACKET *pSDP; pSDP = new SQLDATAPACKET; pSDP->code = DB_USER_COUNT; Datalength = 0; pSDP->dcount = Datalength; pSDP->UID = 0; pSDP->pData = NULL; EnterCriticalSection( &m_CS_SqlData ); RecvSqlData.AddTail(pSDP); nSqlDataCount = RecvSqlData.GetCount(); LeaveCriticalSection( &m_CS_SqlData ); */ // } } return 1; } //////////////////////////////////////////////////////////////////////// // Event 扁埃阑 悸泼窍扁 困茄 Thread // UINT EventTimerThread(void *lParam) { COM* pCom = (COM*) lParam; while(TRUE) { /* SYSTEMTIME st; ::GetLocalTime(&st); COleDateTime oleEventStart(2002, 12, 27, 9, 0, 0), oleEventEnd(2002, 12, 28, 9, 0, 0); COleDateTime oleToday; if(st.wYear == 2002 && st.wMonth == 12) { if(st.wDay >= 27 && st.wDay <= 28) { oleToday.SetDateTime(st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); if(oleToday >= oleEventStart && oleToday <= oleEventEnd) g_iMoonEvent = 1; else g_iMoonEvent = 1; } else { g_iMoonEvent = 1; } } else { g_iMoonEvent = 1; } */ //线程退出 if(g_bShutDown) break; pCom->ThrowItemRefresh(); Sleep(1000 * 30); } return 1; } ////////////////////////////////////////////////////////////////////// // Construction/Destruction COM::COM() { m_bInit = FALSE; m_dwConcurrency = 1; } COM::~COM() { if(m_bInit) { for(int i = 0; i < AUTOMATA_THREAD+1; i++) DeleteCriticalSection( &m_critSendData[i] ); DeleteCriticalSection( &m_critGuild ); DeleteCriticalSection( &m_critEvent ); // IKING 2001.1. DeleteCriticalSection( &m_critThrowItem ); // DeleteAll(); } } /////////////////////////////////////////////////////////////////////// // 劝寸沁带 皋葛府甫 秦力茄促. // void COM::DeleteAll() { DeleteThrowItemArray(); } ///////////////////////////////////////////////////////////////////////// // Item Array 昏力 // void COM::DeleteThrowItemArray() { int i; for(i = 0; i < MAX_THROW_ITEM; i++ ) { if( m_ThrowItemArray[i] ) { delete m_ThrowItemArray[i]; m_ThrowItemArray[i] = NULL; } } } ///////////////////////////////////////////////////////////////////////// // 甘俊 阶捞绰 酒捞袍 Array 檬扁拳 // void COM::InitThrowItemArray() { int i; CThrowItem *f=NULL; CThrowItem *n=NULL; m_ThrowAddIndex = 0; for(i = 0; i < MAX_THROW_ITEM; i++) { m_ThrowItemArray[i] = new CThrowItem; m_ThrowItemArray[i]->ID=i; if(n!=NULL) n->next=m_ThrowItemArray[i]; n=m_ThrowItemArray[i]; m_ThrowItemArray[i]->front=f; f=m_ThrowItemArray[i]; } m_ThrowItemArray[MAX_THROW_ITEM-1]->next=NULL; m_ThrowItemUse=NULL;//已被使用物品队列 m_ThrowItemUnuse=m_ThrowItemArray[0];//未被使用用的物品队列 } bool COM::ThrowItemAdd(ItemList *pItem, int x, int y, int z) { CThrowItem *ItemAdd; MAP* pAddMap = g_zone[z]; EnterCriticalSection( &m_critThrowItem ); //从空队列取一点 ItemAdd=ItemGetID(); if(ItemAdd==NULL){ LeaveCriticalSection( &m_critThrowItem ); return false; } //把新物品存到取出结构中 ItemAdd->m_pItem = pItem; ItemAdd->m_z = z; ItemAdd->m_x = x; ItemAdd->m_y = y; ItemAdd->dTime=GetTickCount(); //把取出结构插到已经用的队列中 ItemUseInset(ItemAdd); pAddMap->m_pMap[x][y].iIndex = ItemAdd->ID; LeaveCriticalSection( &m_critThrowItem ); return true; } CThrowItem * COM::ItemGetID() { CThrowItem *Item=NULL; Item=m_ThrowItemUnuse; if(m_ThrowItemUnuse==NULL) return NULL; m_ThrowItemUnuse=m_ThrowItemUnuse->next; if(m_ThrowItemUnuse!=NULL) m_ThrowItemUnuse->front=NULL; return Item; } void COM::ItemUseInset(CThrowItem *Add) { CThrowItem *tmp; tmp=m_ThrowItemUse; m_ThrowItemUse=Add; Add->next=tmp; if(tmp!=NULL) tmp->front=Add; } void COM::ThrowItemDel(CThrowItem *Del) { CThrowItem *tmp; //删除节点 if(m_ThrowItemUse==Del){ //头节点删除 m_ThrowItemUse=Del->next; if(m_ThrowItemUse!=NULL) m_ThrowItemUse->front=NULL; }else{ if(Del->next!=NULL){ //尾节点删除 Del->next->front=Del->front; } Del->front->next=Del->next; } //将未用的节点放入未用的队列中 tmp=m_ThrowItemUnuse; m_ThrowItemUnuse=Del; Del->next=tmp; if(tmp!=NULL) tmp->front=Del; } //服务器所有物品刷新 void COM::ThrowItemRefresh() { CThrowItem *p; DWORD TimeNow=GetTickCount(); p=m_ThrowItemUse; EnterCriticalSection( &m_critThrowItem ); // while(p){ for(int i=0 ;i<MAX_THROW_ITEM;i++){ p=m_ThrowItemArray[i]; if(!p->m_pItem){ /* MAP* pAddMap = g_zone[p->m_z]; int mapIndex= pAddMap->m_pMap[p->m_x][p->m_y].iIndex; */ continue; } //刷新时间到了 清掉这个物品 // if((TimeNow - p->m_pItem->dwTime) > (180 *`1000)){ //TRACE( "牛牛物品刷新 %d\n", TimeNow - p->dTime); if((TimeNow - p->dTime) > (60 * 1000 * 5)){ // SendItemFieldInfo(ITEM_INFO_PICKUP, p->m_pItem, p->m_x,p->m_y,p->m_z); MAP* pAddMap = g_zone[p->m_z]; int mapIndex= pAddMap->m_pMap[p->m_x][p->m_y].iIndex; if(mapIndex!=-1){ if ( m_ThrowItemArray[mapIndex] != NULL ) //清除 { if(m_ThrowItemArray[mapIndex]->m_pItem) { CPoint t = ConvertToClient( p->m_x, p->m_y, pAddMap->m_vMoveCell.m_vDim.cx, pAddMap->m_vMoveCell.m_vDim.cy ); if( t.x == -1 || t.y == -1 ) continue; int index = 0; char temp_send[1024]; SetByte( temp_send, FIELD_ITEM_INFO, index ); SetShort( temp_send, 1, index ); SetByte( temp_send, ITEM_INFO_DELETE, index ); SetShort( temp_send, t.x, index ); SetShort( temp_send, t.y, index ); if(m_ThrowItemArray[mapIndex]->m_pItem->tType == TYPE_ITEM) { SetShort( temp_send, m_ThrowItemArray[mapIndex]->m_pItem->sSid, index ); SetDWORD( temp_send, m_ThrowItemArray[mapIndex]->m_pItem->sCount, index ); SetByte( temp_send, m_ThrowItemArray[mapIndex]->m_pItem->tIQ, index); } else { SetShort( temp_send, TYPE_MONEY_SID, index ); SetDWORD( temp_send, m_ThrowItemArray[mapIndex]->m_pItem->dwMoney, index ); SetByte( temp_send, 0, index); } SEND_DATA* pNewData = NULL; pNewData = new SEND_DATA; if( !pNewData ) return; pNewData->flag = SEND_INSIGHT; pNewData->len = index; memcpy( pNewData->pBuf, temp_send, index ); pNewData->uid = 0; pNewData->x = p->m_x; pNewData->y = p->m_y; pNewData->z = pAddMap->m_Zone; pNewData->zone_index = m_ThrowItemArray[mapIndex]->m_z; Send(pNewData); delete pNewData; delete m_ThrowItemArray[mapIndex]->m_pItem; m_ThrowItemArray[mapIndex]->m_pItem = NULL; ThrowItemDel(m_ThrowItemArray[mapIndex]); pAddMap->m_pMap[p->m_x][p->m_y].iIndex=-1; } } } } } LeaveCriticalSection( &m_critThrowItem ); } BOOL COM::Init(int port) { GetCPUCount(); char name[255]; // PHOSTENT hostinfo; if( gethostname ( name, sizeof(name)) == 0) { // if((hostinfo = gethostbyname(name)) != NULL) // { // m_strIP = inet_ntoa (*(struct in_addr *)*hostinfo->h_addr_list); // } m_strIP.Format( "%s", name ); } for( int i = 0; i < AUTOMATA_THREAD+1; i++ ) InitializeCriticalSection( &m_critSendData[i] ); InitializeCriticalSection( &m_critGuild ); InitializeCriticalSection( &m_critEvent ); // IKING 2001.1. InitializeCriticalSection( &m_critThrowItem ); // InitThrowItemArray(); CreateTimerThread(); CreateEventTimerThread(); // New Send Thread - alisia // InitSendThread(); // CreateSendThread(); m_bInit = TRUE; m_TotalUserCount = 0; return TRUE; } void COM::SendItemFieldInfo(BYTE type, ItemList *pItem, int x, int y,int z) { if(!pItem) return; CBufferEx TempBuf; TempBuf.Add(FIELD_ITEM_INFO); TempBuf.Add((short)0x01); TempBuf.Add(type); TempBuf.Add((short)x); TempBuf.Add((short)y); if(pItem->tType == TYPE_ITEM) { TempBuf.Add(pItem->sSid); TempBuf.Add((DWORD)pItem->sCount); TempBuf.Add((BYTE)pItem->tIQ); } else { TempBuf.Add(TYPE_MONEY_SID); TempBuf.Add(pItem->dwMoney); TempBuf.Add((BYTE)0); } SendInsight(TempBuf, TempBuf.GetLength(),x,y,z); } void COM::SendInsight(TCHAR *pBuf, int nLength,int x,int y,int z) { if(nLength <= 0 || nLength >= SEND_BUF_SIZE) return; SEND_DATA* pNewData = NULL; pNewData = new SEND_DATA; if(pNewData == NULL) return; pNewData->flag = SEND_INSIGHT; pNewData->len = nLength; ::CopyMemory(pNewData->pBuf, pBuf, nLength); pNewData->uid = 0; pNewData->x = x; pNewData->y = y; pNewData->z = z; pNewData->zone_index = 0; Send(pNewData); if(pNewData) delete pNewData; } ///////////////////////////////////////////////////////////////////////////////////////// // Send 甫 淬寸且 Thread甫 父电促. (泅犁绰 CPU 肮荐 * 2) // /* void COM::CreateSendThread() { // ensure we aren't wiping out a valid completion port ASSERT( m_hSendIOCP == INVALID_HANDLE_VALUE ); m_hSendIOCP = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); if(SErr(m_hSendIOCP == NULL, _T("CreateWorkerThread"))) return; HANDLE hThread = NULL; unsigned int dwWorkerId; for(DWORD i = 0; i < AUTOMATA_THREAD * 2; i++) { //if(SErr((hThread = ::CreateThread( NULL, 0, SendThreadMain, (LPVOID)this, 0, &dwWorkerId)) == NULL, _T("CreateSendThread"))) return; if(SErr((hThread = (HANDLE)_beginthreadex( NULL, 0, SendThreadMain, (LPVOID)this, 0, &dwWorkerId)) == NULL, _T("CreateSendThread"))) return; //if(SErr((hThread = (HANDLE)_beginthreadex( NULL, 0, &SendThreadMain, (LPVOID)this, 0, &dwWorkerId)) == NULL, _T("CreateSendThread"))) return; //m_pSendThread = AfxBeginThread(SendThreadMain, this, THREAD_PRIORITY_NORMAL, 0, CREATE_SUSPENDED); //ASSERT_POINTER( m_pSendThread, CWinThread ); } } */ /////////////////////////////////////////////////////////////////////////////////// // global Time甫 蜡瘤且 Timer Thread 甫 父电促. // BOOL COM::CreateTimerThread() { //DWORD id; //unsigned int id; //DWORD dwThreadAffinityMask = 0; //HANDLE hTimerThread; _int64 frequence = 0; if(!QueryPerformanceFrequency((LARGE_INTEGER*)&frequence)) return FALSE; // g_Online_Update_ticks = (UPDATE_TICKS * frequence) / 1000; // 500000 g_Online_Update_Min_ticks = (UPDATE_TICKS * frequence) / 5000; // 100000 // QueryPerformanceCounter((LARGE_INTEGER*)&g_OnlineStart); QueryPerformanceCounter((LARGE_INTEGER*)&g_OnlineMinStart); //hTimerThread = ::CreateThread( NULL, 0, TimerThread, (LPVOID)this, 0, &id); //hTimerThread = (HANDLE)_beginthreadex( NULL, 0, &TimerThread, (LPVOID)this, 0, &id); m_hTimerThread = AfxBeginThread( TimerThread, (LPVOID)this ); //if(SErr(NULL == hTimerThread, _T("CreateTimerThread"))) return FALSE; //DWORD result = SetThreadAffinityMask (hTimerThread, dwThreadAffinityMask); //if(result == 0) return FALSE; return TRUE; } /////////////////////////////////////////////////////////////////////////////////// // Create Event Timer Thread // BOOL COM::CreateEventTimerThread() { AfxBeginThread(EventTimerThread, (LPVOID)this ); return TRUE; } //////////////////////////////////////////////////////////////////////////////////// // 秦寸 粮甫 茫酒 朝揪, 矫埃甫 舅妨霖促. // void COM::SetWeatherZone(int iZone, BYTE tType, BYTE tOnOff) { // alisia BOOL bFound = FALSE; int iZoneIndex = 0,i; for( i = 0; i < g_cell.GetSize(); i++ ) { if( g_cell[i] ) { if( g_cell[i]->m_Zone == iZone ) { bFound = TRUE; iZoneIndex = i; break; } } } if( !bFound ) return; // alisia USER *pUser1 = NULL; USER *pUser2 = NULL; for( i = 0; i < MAX_USER; i += 2 ) { pUser1 = g_cell[iZoneIndex]->m_pCom->GetUserUid(i); pUser2 = g_cell[iZoneIndex]->m_pCom->GetUserUid(i+1); if(pUser1 != NULL && pUser1->m_state == STATE_GAMESTARTED && pUser1->m_curz == iZone) pUser1->SetWeather(tType, tOnOff); if(pUser2 != NULL && pUser2->m_state == STATE_GAMESTARTED && pUser2->m_curz == iZone) pUser2->SetWeather(tType, tOnOff); pUser1 = NULL; pUser2 = NULL; } } //////////////////////////////////////////////////////////////////////////////////// // 立加茄 葛电 蜡历俊霸 矫埃阑 舅妨霖促. // void COM::SendTimeToAll() { BYTE tType = 1; USER *pUser = NULL; for(int i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; pUser->SetWeather(tType, 1); } } //////////////////////////////////////////////////////////////////////// // CPU狼 肮荐甫 备茄促. // void COM::GetCPUCount() { SYSTEM_INFO SystemInfo; GetSystemInfo(&SystemInfo); m_dwConcurrency = SystemInfo.dwNumberOfProcessors; } //////////////////////////////////////////////////////////////////////// // Send Data // void COM::Send(SEND_DATA *pData) { USER* pUser = NULL; MAP* pMap = NULL; int min_x, max_x, min_y, max_y; int sx, sy; int i, j; int temp_uid; if( !pData ) { return; } if( !pData->len || pData->len <= 0 || pData->len >= RECV_BUF_SIZE ) { TRACE("Data Length OverFlow - Send(SEND_DATA *pData)\n"); return; } switch( pData->flag ) { case SEND_USER: if( pData->uid < 0 || pData->uid >= MAX_USER ) { return; } pUser = GetUserUid(pData->uid); if ( pUser == NULL ) return; if( pUser->m_state == STATE_DISCONNECTED || pUser->m_state == STATE_LOGOUT ) { return; } Send( pUser, pData->pBuf, pData->len ); break; case SEND_INSIGHT: sx = pData->x / SIGHT_SIZE_X; sy = pData->y / SIGHT_SIZE_Y; min_x = (sx-1)*SIGHT_SIZE_X; if( min_x < 0 ) min_x = 0; max_x = (sx+2)*SIGHT_SIZE_X; min_y = (sy-1)*SIGHT_SIZE_Y; if( min_y < 0 ) min_y = 0; max_y = (sy+2)*SIGHT_SIZE_Y; pMap = g_zone[pData->zone_index]; if( !pMap ) return; if( max_x >= pMap->m_sizeMap.cx ) max_x = pMap->m_sizeMap.cx - 1; if( max_y >= pMap->m_sizeMap.cy ) max_y = pMap->m_sizeMap.cy - 1; for( i = min_x; i < max_x; i++ ) { for( j = min_y; j < max_y; j++ ) { temp_uid = pMap->m_pMap[i][j].m_lUser; if(temp_uid < USER_BAND || temp_uid >= NPC_BAND) continue; else temp_uid -= USER_BAND; if( temp_uid >= 0 && temp_uid < MAX_USER ) { pUser = GetUserUid(temp_uid); if ( pUser == NULL ) continue; if( pUser->m_state == STATE_GAMESTARTED ) { if( pUser->m_curx == i && pUser->m_cury == j && pUser->m_curz == pData->z ) { Send( pUser, pData->pBuf, pData->len ); } } } } } break; case SEND_RANGE: pMap = g_zone[pData->zone_index]; if( !pMap ) return; min_x = pData->rect.left; if(min_x < 0 ) min_x = 0; max_x = pData->rect.right; if(max_x >= pMap->m_sizeMap.cx ) max_x = pMap->m_sizeMap.cx - 1; min_y = pData->rect.top; if(min_y < 0 ) min_y = 0; max_y = pData->rect.bottom; if(max_y >= pMap->m_sizeMap.cy ) max_y = pMap->m_sizeMap.cy - 1; for( i = min_x; i < max_x; i++ ) { for( j = min_y; j < max_y; j++ ) { temp_uid = pMap->m_pMap[i][j].m_lUser; if(temp_uid < USER_BAND || temp_uid >= NPC_BAND) continue; else temp_uid -= USER_BAND; if( temp_uid >= 0 && temp_uid < MAX_USER ) { pUser = GetUserUid(temp_uid); // IKING 2001.1. if ( pUser == NULL ) continue; // if( pUser->m_state == STATE_GAMESTARTED ) { if( pUser->m_curx == i && pUser->m_cury == j && pUser->m_curz == pData->z ) { Send( pUser, pData->pBuf, pData->len); } } } } } break; case SEND_ZONE: for(i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if( !pUser ) continue; if( pUser->m_state != STATE_GAMESTARTED ) continue; if( pData->z == pUser->m_curz ) { if( pUser->m_state == STATE_GAMESTARTED ) { Send( pUser, pData->pBuf, pData->len ); } } } break; case SEND_ALL: for(i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if( !pUser ) continue; if( pUser->m_state != STATE_GAMESTARTED ) continue; Send( pUser, pData->pBuf, pData->len ); } break; case SEND_SCREEN: min_x = pData->x - 16; if( min_x < 0 ) min_x = 0; max_x = pData->x + 16; min_y = pData->y - 16; if( min_y < 0 ) min_y = 0; max_y = pData->y + 16; pMap = g_zone[pData->zone_index]; if( !pMap ) return; if( max_x >= pMap->m_sizeMap.cx ) max_x = pMap->m_sizeMap.cx - 1; if( max_y >= pMap->m_sizeMap.cy ) max_y = pMap->m_sizeMap.cy - 1; for( i = min_x; i < max_x; i++ ) { for( j = min_y; j < max_y; j++ ) { if( ( abs( pData->x - i ) + abs( pData->y - j ) ) > 16 ) continue; temp_uid = pMap->m_pMap[i][j].m_lUser; if(temp_uid < USER_BAND || temp_uid >= NPC_BAND) continue; else temp_uid -= USER_BAND; if( temp_uid >= 0 && temp_uid < MAX_USER ) { pUser = GetUserUid(temp_uid); if ( pUser == NULL ) continue; if( pUser->m_state == STATE_GAMESTARTED ) { Send( pUser, pData->pBuf, pData->len ); } } } } break; default: break; } } //////////////////////////////////////////////////////////////////////// // Send Data // void COM::Send(USER *pUser, TCHAR *pBuf, int nLength) { if(pUser == NULL) return; pUser->Send(pBuf, nLength); } //向地面物品数组添加一个物品 BOOL COM::SetThrowItem(ItemList *pItem, int x, int y, int z) { if( !pItem ) { return FALSE; } MAP* pAddMap = g_zone[z]; if( !pAddMap ) return FALSE; if( pItem->tType != TYPE_MONEY && pItem->tType != TYPE_ITEM) return FALSE; CPoint t = ConvertToClient( x, y, pAddMap->m_vMoveCell.m_vDim.cx, pAddMap->m_vMoveCell.m_vDim.cy ); if( t.x == -1 || t.y == -1 ) return FALSE; /* IKING 2001.1. EnterCriticalSection( &m_critThrowItem ); m_ThrowItemArray[m_ThrowAddIndex]->m_pItem = pItem; m_ThrowItemArray[m_ThrowAddIndex]->m_z = z; // 开矫 粮锅龋啊 酒聪绊 Zone Index 捞促 m_ThrowItemArray[m_ThrowAddIndex]->m_x = x; m_ThrowItemArray[m_ThrowAddIndex]->m_y = y; pAddMap->m_pMap[x][y].iIndex = m_ThrowAddIndex; m_ThrowAddIndex++; if( m_ThrowAddIndex >= MAX_THROW_ITEM ) m_ThrowAddIndex = 0; LeaveCriticalSection( &m_critThrowItem ); */ if(ThrowItemAdd(pItem, x,y,z)==false) return false; int index = 0; char temp_send[1024]; SetByte( temp_send, FIELD_ITEM_INFO, index ); SetShort( temp_send, 1, index ); SetByte( temp_send, ITEM_INFO_MODIFY, index ); SetShort( temp_send, t.x, index ); SetShort( temp_send, t.y, index ); if( pItem->tType == TYPE_MONEY ) { SetShort( temp_send, TYPE_MONEY_SID, index ); SetDWORD( temp_send, pItem->dwMoney, index ); SetByte( temp_send, 0, index); } else { SetShort( temp_send, pItem->sSid, index ); SetDWORD( temp_send, pItem->sCount, index ); SetByte( temp_send, pItem->tIQ, index); SetByte( temp_send, pItem->tMagic[0], index); SetByte( temp_send, pItem->tMagic[1], index); SetByte( temp_send, pItem->tMagic[2], index); SetByte( temp_send, pItem->tMagic[3], index); SetByte( temp_send, pItem->tMagic[4], index); } SEND_DATA* pNewData = NULL; pNewData = new SEND_DATA; if( !pNewData ) return TRUE; pNewData->flag = SEND_INSIGHT; pNewData->len = index; memcpy( pNewData->pBuf, temp_send, index ); pNewData->uid = 0; pNewData->x = x; pNewData->y = y; pNewData->z = pAddMap->m_Zone; pNewData->zone_index = z; // IKING 2001.1. //EnterCriticalSection( &(m_critSendData) ); //m_arSendData.Add( pNewData ); //LeaveCriticalSection( &(m_critSendData) ); //PostQueuedCompletionStatus( m_hSendIOCP, 0, 0, NULL ); Send(pNewData); delete pNewData; // return TRUE; } //删除物品 void COM::DelThrowItem() { ItemList* pThrowItem = NULL; int z, x, y; MAP* pDelMap = NULL; // IKING 2001.1. EnterCriticalSection( &m_critThrowItem ); pThrowItem = m_ThrowItemArray[m_ThrowAddIndex]->m_pItem; // if( pThrowItem ) { z = m_ThrowItemArray[m_ThrowAddIndex]->m_z; // 粮锅龋啊 酒聪绊 Zone Index 捞促 x = m_ThrowItemArray[m_ThrowAddIndex]->m_x; y = m_ThrowItemArray[m_ThrowAddIndex]->m_y; // IKING if(z < 0 || z >= g_zone.GetSize()) { LeaveCriticalSection( &m_critThrowItem ); return; } pDelMap = g_zone[z]; if( pDelMap ) { if(x <= -1 || y <= -1) { LeaveCriticalSection( &m_critThrowItem ); return; } if(x >= pDelMap->m_sizeMap.cx || y >= pDelMap->m_sizeMap.cy) { LeaveCriticalSection( &m_critThrowItem ); return; } CPoint t = ConvertToClient( x, y, pDelMap->m_vMoveCell.m_vDim.cx, pDelMap->m_vMoveCell.m_vDim.cy ); if( t.x == -1 || t.y == -1 ) { LeaveCriticalSection( &m_critThrowItem ); return; } pDelMap->m_pMap[x][y].iIndex = -1; m_ThrowItemArray[m_ThrowAddIndex]->m_pItem = NULL; int index = 0; char temp_send[1024]; SetByte( temp_send, FIELD_ITEM_INFO, index ); SetShort( temp_send, 1, index ); SetByte( temp_send, ITEM_INFO_DELETE, index ); SetShort( temp_send, t.x, index ); SetShort( temp_send, t.y, index ); if(pThrowItem->tType == TYPE_ITEM) { SetShort( temp_send, pThrowItem->sSid, index ); SetDWORD( temp_send, pThrowItem->sCount, index ); SetByte( temp_send, pThrowItem->tIQ, index); } else { SetShort( temp_send, TYPE_MONEY_SID, index ); SetDWORD( temp_send, pThrowItem->dwMoney, index ); SetByte( temp_send, 0, index); } delete pThrowItem; LeaveCriticalSection( &m_critThrowItem ); SEND_DATA* pNewData = NULL; pNewData = new SEND_DATA; if( !pNewData ) return; pNewData->flag = SEND_INSIGHT; pNewData->len = index; memcpy( pNewData->pBuf, temp_send, index ); pNewData->uid = 0; pNewData->x = x; pNewData->y = y; pNewData->z = pDelMap->m_Zone; pNewData->zone_index = z; //EnterCriticalSection( &(m_critSendData) ); //m_arSendData.Add( pNewData ); //LeaveCriticalSection( &(m_critSendData) ); //PostQueuedCompletionStatus( m_hSendIOCP, 0, 0, NULL ); Send(pNewData); delete pNewData; } } else LeaveCriticalSection( &m_critThrowItem ); // 瘤陛 眠啊且 镑俊 酒捞袍捞 绝带啊, 酒捞袍阑 瘤快促 角菩茄 版快捞促 } ///////////////////////////////////////////////////////////////////////////////////////////////////////// // 鞘靛惑痢俊 措茄 辨靛傈 矫埃阑 眉农窍绊 林牢捞 乐绰 惑痢俊 措茄 技陛阑 辨靛芒绊肺 利赋茄促. // void COM::CheckGuildWarTime() { SYSTEMTIME guildTime; GetLocalTime(&guildTime); DWORD dwCurrTick = GetTickCount(); // 力茄 矫埃郴狼 辨靛傈阑 眉农... COleDateTime CurrTime = COleDateTime(guildTime); // SYSTEMTIME st; int i, j; int nTime = 0; static int nState = 0; CString strMsg; DWORD dwTick = 0, dwInter = 0; DWORD dwInterTime = 0; for(i = 0; i < MAX_SINGLE_EVENT; i++) { if(g_QuestEventZone.m_SingleEventZone[i].m_lUsed == 1) { dwTick = dwInter = dwInterTime = 0; dwTick = g_QuestEventZone.m_SingleEventZone[i].m_dwStartTime; dwInterTime = (DWORD)g_QuestEventZone.m_SingleEventZone[i].m_tEventTime; dwInter = g_QuestEventZone.m_SingleEventZone[i].m_dwInterTick; if(dwCurrTick - dwTick > 60000 * dwInterTime) { g_QuestEventZone.RemoveUserInEventZone(i); g_QuestEventZone.m_SingleEventZone[i].m_lUsed = 0; } else if(dwInter != 0 && dwCurrTick - dwInter > 10000) // 10檬饶 磊悼栏肺 炮 抛胶飘 { g_QuestEventZone.RemoveUserInEventZone(i); g_QuestEventZone.m_SingleEventZone[i].m_lUsed = 0; g_QuestEventZone.m_SingleEventZone[i].m_dwInterTick = 0; } } } for(j = 0; j < g_arGuildHouseWar.GetSize(); j++) { if(!g_arGuildHouseWar[j]) continue; if(g_arGuildHouseWar[j]->m_CurrentGuild.lUsed == 1) { DWORD dwTempTick = g_arGuildHouseWar[j]->m_CurrentGuild.dwTimer; DWORD dwInterTick = g_arGuildHouseWar[j]->m_CurrentGuild.dwIntervalTick; if(dwTempTick != 0 && dwCurrTick - dwTempTick > 60000 * 10) { g_arGuildHouseWar[j]->CheckGuildHouseRank(); // Agent俊 历厘茄促. g_arGuildHouseWar[j]->SetNpcListToWarEnd(); //g_arGuildHouseWar[j]->SetUserListToWarEnd(user_array); g_arGuildHouseWar[j]->SetUserListToWarEnd(); g_arGuildHouseWar[j]->InitGuild(); g_arGuildHouseWar[j]->m_CurrentGuild.dwIntervalTick = 0; InterlockedExchange(&g_arGuildHouseWar[j]->m_CurrentGuild.lUsed, 0); } else if(dwInterTick != 0 && dwCurrTick - dwInterTick > 10000) // 10檬饶 磊悼栏肺 炮 抛胶飘 { g_arGuildHouseWar[j]->m_CurrentGuild.dwIntervalTick = 0; // g_arGuildHouseWar[j]->CheckGuildHouseRank(); // Agent俊 历厘茄促. // 荤侩吝牢 钎矫甫 钱绢霖促. // g_arGuildHouseWar[j]->SetNpcListToWarEnd(); //g_arGuildHouseWar[j]->SetUserListToWarEnd(user_array); // 悸泼甫 钱绢霖促. g_arGuildHouseWar[j]->SetUserListToWarEnd(); // 悸泼甫 钱绢霖促. g_arGuildHouseWar[j]->InitGuild(); // 单捞磐甫 朝赴促. ::InterlockedExchange(&g_arGuildHouseWar[j]->m_CurrentGuild.lUsed, 0); } } if(CurrTime.GetDay() == GUILD_HOUSE_DAY) // 辨靛窍快胶 盒剧 朝楼 { if(CurrTime.GetHour() == 0) // 辨靛窍快胶 盒剧 矫埃 { if(CurrTime.GetMinute() == 20 && !nState) { nState = 1; for(i = 0; i < MAX_GUILD_HOUSE_NUM - 1; i++) // 瘤陛 辨.窍啊 6俺挥捞扼 捞吧 DB俊 历厘窍妨搁 茄波锅俊秦具窍唱? { if(!g_arGuildHouse[i]) continue; if(InterlockedCompareExchange((long*)&g_arGuildHouse[i]->lUsed, (long)1, (long)0) == (long)0) { g_arGuildHouse[i]->iSid = i + 1; g_arGuildHouse[i]->iGuild = g_arGuildHouseWar[j]->m_TopList[i].lGuild; g_arGuildHouse[i]->UpdateGuildHouse(); ::InterlockedExchange(&g_arGuildHouse[i]->lUsed, 0); } } g_arGuildHouseWar[j]->ResetGuildHouseRank(); RemoveUserInGuildHouse(); } } } } } void COM::SendGuildWarBegin(CStore *pStore) { UpdateStoreMem2DB(pStore); int i, j, k, type; USER *pUser = NULL; BOOL bSend = FALSE; CBufferEx TempBuf; CDWordArray arGuildList; arGuildList.RemoveAll(); if(pStore->m_iGuildSid <= 0) return; // 辨靛傈阑困秦 曼咯吝牢 傈眉 府胶飘甫 父电促. arGuildList.Add(pStore->m_iGuildSid); // 郴辨靛 刚历... for(j =0; j < GUILD_ATTACK_MAX_NUM; j++) // 惑措祈 辨靛... { if(pStore->m_arAttackGuild[j] > 0) { arGuildList.Add(pStore->m_arAttackGuild[j]); } } CString strMsg; CGuild *pGuild = NULL; for(i = 0; i < arGuildList.GetSize(); i++) // 惑痢 傈里傈 鞘靛傈捞 乐栏搁 檬扁拳 秦霖促. { j = arGuildList[i]; if(j <= 0 || j >= g_arGuildData.GetSize()) continue; pGuild = g_arGuildData[j]; if(!pGuild) continue; pUser = GetUser(pGuild->m_strMasterName); if(!pUser) continue; if(pUser->m_tGuildWar == GUILD_WARRING && pUser->m_dwFieldWar > 0) { strMsg = _ID(IDS_USER_DRAW); pUser->SendGuildWarFieldEnd((LPTSTR)(LPCTSTR)strMsg);// 亲汗 } } short nCount = arGuildList.GetSize(); if(nCount <= 1) { strMsg = _T(""); strMsg.Format( IDS_USER_END_NO_APPLY_GUILD, pStore->m_strGuildName); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_NORMAL); pStore->m_dwStartTick = 0; return; } TempBuf.Add(GUILD_WAR); TempBuf.Add((BYTE)0x01); // 辨靛傈 矫累... TempBuf.Add((BYTE)GUILD_STORE_WARRING); // 惑痢傈... TempBuf.Add((short)pStore->m_sStoreID); // 秦寸 瘤开 牢郸胶 /* TempBuf.Add(nCount); // 醚 辨靛荐 for(i = 0; i < nCount; i++) { TempBuf.Add(arGuildList[i]); } */ for(i = 0; i < MAX_USER; i++) { type = 0; pUser = GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; if(pUser->m_dwGuild <= 0) continue; type = ((g_zone[pUser->m_ZoneIndex]->m_pMap[pUser->m_curx][pUser->m_cury].m_dwType & 0xFF00 ) >> 8); // if(g_arMapTable[type]->m_sStoreID != pStore->m_sStoreID) continue; if(type >= 0 && type < 8) { //辨靛傈捞 老绢唱绰 惑痢瘤开捞搁 if(g_arMapTable[type]->m_sStoreID == pStore->m_sStoreID)// && g_arMapTable[type]->m_sStoreZone) { if(pStore->m_iGuildSid != pUser->m_dwGuild && pUser->m_tIsOP != 1 ) pUser->TownPotal(); // 规绢辨靛盔捞 酒聪搁 葛滴 付阑肺 } } if(pStore->m_iGuildSid <= 0) { TempBuf.Add((BYTE)0x00); // 老馆蜡历 pUser->Send(TempBuf, TempBuf.GetLength()); continue; } if(pStore->m_iGuildSid == pUser->m_dwGuild) { pUser->BeginGuildWar(); // 辨靛 傈阑 矫累茄促. TempBuf.Add((BYTE)0x01); // 秦寸 辨靛 蜡历 TempBuf.Add(nCount); // 醚 辨靛荐 for(j = 0; j < nCount; j++) { TempBuf.Add(arGuildList[j]); } pUser->Send(TempBuf, TempBuf.GetLength()); } else { for(j =0; j < GUILD_ATTACK_MAX_NUM; j++) { if(pStore->m_arAttackGuild[j] <= 0) continue; if(pStore->m_arAttackGuild[j] == pUser->m_dwGuild) { pUser->BeginGuildWar(); // 辨靛 傈阑 矫累茄促. TempBuf.Add((BYTE)0x01); // 秦寸 辨靛 蜡历 TempBuf.Add(nCount); // 醚 辨靛荐 for(k = 0; k < nCount; k++) { TempBuf.Add(arGuildList[k]); } bSend = TRUE; pUser->Send(TempBuf, TempBuf.GetLength()); break; } } if(!bSend) { TempBuf.Add((BYTE)0x00); // 老馆蜡历 pUser->Send(TempBuf, TempBuf.GetLength()); } } } for(j = 0; j < pStore->m_arNpcList.GetSize(); j++) // 秦寸 NPC俊霸档 舅府绊 { type = pStore->m_arNpcList[j]; if( type < 0 || type >= g_arNpc.GetSize() ) continue; g_arNpc[type]->m_tGuildWar = GUILD_WARRING; if(g_arNpc[type]->m_tNpcType == NPCTYPE_GUILD_GUARD) g_arNpc[type]->m_tNpcAttType = 1; } } void COM::SendGuildWarEnd(CStore *pStore) { int i, j, type; int index = 0; USER *pUser = NULL; if(pStore->UpdateGuildStore() == FALSE) return; pStore->InitStoreInfo(pStore->m_iGuildSid); pStore->SetGuildStoreTex(); CBufferEx TempBuf; TempBuf.Add(GUILD_WAR); TempBuf.Add((BYTE)0x02); // 辨靛傈 辆丰... TempBuf.Add(GUILD_STORE_WARRING); TempBuf.Add((short)pStore->m_sStoreID); for(i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; if(pUser->m_dwGuild <= 0) continue; pUser->m_tGuildWar = GUILD_WAR_AFFTER; pUser->m_FieldWarGMUid = 0; pUser->m_dwFieldWar = 0; pUser->Send(TempBuf, TempBuf.GetLength()); type = pUser->CheckInvalidMapType(); if(type >= 0 && type < 17) { index = g_arMapTable[type]->m_sStoreID; if(index < FORTRESS_BAND) { CStore *pStore = NULL; index = g_arMapTable[type]->m_sStoreIndex; pStore = pUser->GetStore(index); if(pStore) { pUser->SendSystemMsg( IDS_USER_END_FIELD_STORE_WAR, SYSTEM_NORMAL, TO_ME); } } } } for(j = 0; j < pStore->m_arNpcList.GetSize(); j++) // 秦寸 NPC俊霸档 舅府绊 { type = pStore->m_arNpcList[j]; if( type < 0 || type >= g_arNpc.GetSize() ) continue; g_arNpc[type]->m_tGuildWar = GUILD_WAR_AFFTER; g_arNpc[type]->m_tNpcAttType = 0; } } ///////////////////////////////////////////////////////////////////////////////////////// // 辨靛傈捞 矫累登扁傈俊 辨靛芒绊狼 郴侩阑 DB肺 诀单捞飘 茄促. // void COM::UpdateStoreMem2DB(CStore *pStore) { SQLHSTMT hstmt = NULL; SQLRETURN retcode = 0; BOOL bQuerySuccess = TRUE; TCHAR szSQL[8000]; TCHAR strAttackList[_GUILDLIST_DB]; int i; CSharedMemory* pShared = NULL; CMemStore* pMDStore = NULL; if(pStore == NULL) return; if(pStore->m_sStoreID >= g_arStoreSharedMemory.GetSize()) return; pShared = g_arStoreSharedMemory[pStore->m_sStoreID]; if(pShared == NULL) return; if(pShared->m_hMapping == NULL) return; pMDStore = (CMemStore*) pShared->m_lpData; if(pMDStore == NULL) return; if(pMDStore->m_sStoreID == -1) return; if(pMDStore->m_iGuildSid <= 0) return; ::ZeroMemory(szSQL, sizeof(szSQL)); ::ZeroMemory(strAttackList, sizeof(strAttackList)); pStore->GuildListToStr(strAttackList); SDWORD sLen = sizeof(strAttackList); _sntprintf(szSQL, sizeof(szSQL), TEXT("{call UPDATE_GUILD_STORE(%d, %d, %d, %d, %d, ?)}"), pMDStore->m_sStoreID, pMDStore->m_iGuildSid, pMDStore->m_sTaxRate, pMDStore->m_dwStoreDN, pMDStore->m_tWarType); int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if (retcode == SQL_SUCCESS) { i = 1; SQLBindParameter(hstmt, i++, SQL_PARAM_INPUT, SQL_C_BINARY, SQL_VARBINARY, sizeof(strAttackList), 0, (TCHAR*)strAttackList, 0, &sLen); retcode = SQLExecDirect(hstmt, (unsigned char *)szSQL, SQL_NTS); if (retcode ==SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO) { } else if (retcode==SQL_ERROR) { DisplayErrorMsg( hstmt ); bQuerySuccess = FALSE; } } else { // if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); // g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return; } if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); pMDStore->m_dwStoreDN = 0; } void COM::CheckGuildWarSchedule(CStore *pStore) { if(pStore == NULL) return; int nSize = g_arStoreSharedMemory.GetSize(); CSharedMemory* pShared = NULL; CMemStore* pData = NULL; if(pStore->m_sStoreID >= nSize) return; pShared = g_arStoreSharedMemory[pStore->m_sStoreID]; if(pShared == NULL) return; if(pShared->m_hMapping == NULL) return; pData = (CMemStore*) pShared->m_lpData; if(pData == NULL) return; pStore->m_tWarType = GUILD_WAR_DECISION; pData->m_tWarType = GUILD_WAR_DECISION; } void COM::Announce(TCHAR *pBuf, BYTE sysType) { CBufferEx TempBuf; TempBuf.Add(SYSTEM_MSG); TempBuf.Add(sysType); TempBuf.Add(pBuf, _tcslen(pBuf)); USER* pUser = NULL; for(int i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; pUser->Send(TempBuf, TempBuf.GetLength()); } } void COM::AnnounceZone(TCHAR *pBuf, BYTE sysType, int iZone) { CBufferEx TempBuf; TempBuf.Add(SYSTEM_MSG); TempBuf.Add(sysType); TempBuf.Add(pBuf, _tcslen(pBuf)); USER* pUser = NULL; for(int i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if( pUser == NULL || pUser->m_state != STATE_GAMESTARTED ) continue; if( pUser->m_curz != iZone ) continue; pUser->Send(TempBuf, TempBuf.GetLength()); } } USER* COM::GetUser(TCHAR* id) { if(!strlen(id)) return NULL; CString szSource = id; USER *pUser = NULL; for( int i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if(pUser == NULL) continue; if( pUser->m_state == STATE_DISCONNECTED || pUser->m_state == STATE_LOGOUT ) continue; if(!szSource.CompareNoCase(pUser->m_strUserID ) ) return pUser; } return NULL; } USER* COM::GetUserUid(int uid) { USER *pUser = NULL; if (uid >= 0 && uid < MAXPOSSIBLE_USER ) { pUser = g_pUserList->GetUserUid(uid); /* if ( pUser ) { if ( pUser->m_SockFlag != 1 ) { g_pMainDlg->UserFree( uid ); return NULL; } } */ return pUser; } return NULL; } void COM::UpdateUserCount() { int nCount = 0; USER *pUser = NULL; nCount = 0; for (int i = 0; i < MAX_USER; i++ ) { pUser = GetUserUid(i); if( pUser && pUser->m_state == STATE_GAMESTARTED ) nCount++; } if(nCount >= 500) nCount = (int)((double)nCount * 1.1 + 0.5); SQLHSTMT hstmt; SQLRETURN retcode; TCHAR szSQL[1024]; memset(szSQL, 0x00, 1024); _sntprintf(szSQL, sizeof(szSQL), TEXT("{call UpdateGameUserCount ( \'%s\', %d )}"), m_strIP, nCount ); hstmt = NULL; int db_index = -1; CDatabase* pDB = g_DBSession[0].GetDB( db_index ); if( !pDB ) return; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if (retcode!=SQL_SUCCESS) { return; } retcode = SQLExecDirect (hstmt, (unsigned char *)szSQL, SQL_NTS); if (retcode ==SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO) { } else if (retcode==SQL_ERROR) { // DisplayErrorMsg(hstmt); } else if (retcode==SQL_NO_DATA) { } if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBSession[0].ReleaseDB(db_index); return; } ////////////////////////////离线更新数据 void COM::UpdateUnLineUserTime() // { for(int i = 0; i < MAX_USER; i++) { USER *pUser = GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; if(pUser->m_bSessionOnline) { pUser->UserTimer(); } } } ////尾巴 void COM::ShowCurrentUser2() { /* int nCount = 0; USER *pUser = NULL; nCount = 0; for (int i = 0; i < MAX_USER; i++ ) { pUser = GetUserUid(i); if(pUser && pUser->m_state == STATE_GAMESTARTED ) nCount++; } if(nCount >= 500) nCount = (int)((double)nCount * 1.1 + 0.5); CString strMsg; strMsg = _T(""); strMsg.Format( "当前在线人数 %d" ,nCount*3); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_NORMAL);*/ } void COM::CheckCurrentUserTime() { SYSTEMTIME messageTime; GetLocalTime(&messageTime); DWORD dwCurrTick = GetTickCount(); COleDateTime CurrTime = COleDateTime(messageTime); int nTime = 0; static int nState = 0; if( messageTime.wMinute%5==0 && messageTime.wSecond ==0) { ShowCurrentUser2(); } } void COM::CheckMessageTime() //公告 { SYSTEMTIME messageTime; GetLocalTime(&messageTime); DWORD dwCurrTick = GetTickCount(); // 力茄 矫埃郴狼 辨靛傈阑 眉农... COleDateTime CurrTime = COleDateTime(messageTime); //当时间 int nTime = 0; static int nState = 0; if ( messageTime.wHour == 20) //活动期间不提示 return; if(messageTime.wSecond%10==0 )//电光板 { int dgnumer = g_arDGArray.GetSize(); if(dgnumer > 0) { CBufferEx TempBuf; TempBuf.Add((byte)0x1f); TempBuf.Add((byte)1); TempBuf.Add((byte)0x25); TempBuf.Add(0); TempBuf.AddString(g_arDGArray[0]->m_strUserID); TempBuf.AddString(g_arDGArray[0]->m_DGBAN_BODY); SendAll(TempBuf, TempBuf.GetLength()); g_arDGArray.RemoveAt(0); } } /* if( messageTime.wMinute%5==0 && messageTime.wSecond ==0) { CString strMsg; //const char p[] = { 0xCE, 0xC2, 0xF3, 0xC0, 0xCC, 0xE1, 0xCA, 0xBE, 0x3A, 0x00}; //strMsg.Format(strcat((char*)p,"%s"), g_arMessAgeArray[mIndex]->m_MESSAGE_BODY); strMsg.Format("%s", g_arMessAgeArray[mIndex]->m_MESSAGE_BODY); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_NORMAL); mIndex++; if(mIndex >= g_arMessAgeArray.GetSize() -1) mIndex = 0; } SetHourExp(1);*/ if(messageTime.wHour == 23 && messageTime.wMinute == 59 && messageTime.wSecond == 59) // 清理 { SubQLPKDASAITime(); PersonalShopOpen1(); } } void COM::SendAll(TCHAR *pBuf, int nLength) { if( nLength <= 0 || nLength >= SEND_BUF_SIZE ) return; SEND_DATA* pNewData = NULL; pNewData = new SEND_DATA; if( !pNewData ) return; pNewData->flag = SEND_ALL; pNewData->len = nLength; ::CopyMemory( pNewData->pBuf, pBuf, nLength ); Send(pNewData); if(pNewData) delete pNewData; } void COM::PersonalShopOpen1() { USER *pUser = NULL; int i; for(i = 0; i < MAX_USER; i++) { pUser = g_pUserList->GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; pUser->m_dwLingQu = 1; ////////////////////// pUser->m_dwShopPingDN = 0; pUser->m_dwBHTime = 0; //////////////////////////// CString strMsg; strMsg = _T(""); strMsg.Format( "友情提示:现在已是凌晨0点,请注意休息!"); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } ////////////////////////////////////// void COM::SubQLPKDASAITime() { SQLHSTMT hstmt = NULL; SQLRETURN retcode = 0; TCHAR szSQL[1024]; ::ZeroMemory(szSQL, sizeof(szSQL)); int index = 0; _sntprintf(szSQL, sizeof(szSQL), TEXT("{call UPDATE_PK_QL}")); int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if (retcode != SQL_SUCCESS) return; if (retcode == SQL_SUCCESS) { retcode = SQLExecDirect(hstmt, (unsigned char *)szSQL, SQL_NTS); if (retcode ==SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO) { } else if (retcode==SQL_ERROR) { DisplayErrorMsg( hstmt ); } } if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); } void COM::CheckPKTime() //PK竞赛 { SYSTEMTIME guildTime; GetLocalTime(&guildTime); DWORD dwCurrTick = GetTickCount(); // 力茄 矫埃郴狼 辨靛傈阑 眉农... COleDateTime CurrTime = COleDateTime(guildTime); int nTime = 0; CString strMsg; PKnum = 0;//开始前把人数初始化 PKover = FALSE;//设置PK没结束 if(guildTime.wHour == 19 && guildTime.wMinute>= 55) // 19点45进行公告! { nTime = 60 - guildTime.wMinute; if(guildTime.wSecond == 0) //一分钟公告一次 { strMsg.Format( "单人格斗大赛还有 %d 分钟将会开放进入,20:05时将会关闭,请大家准备哦!", nTime);//"恶魔广场还有 %d 分钟将会开放,请大家准备就绪!" Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } if(guildTime.wHour == 20 && guildTime.wMinute>= 0 && guildTime.wMinute <= 4) // PK开启前准备 { winName =""; if(guildTime.wSecond == 0) { strMsg.Format( "各位勇士紧急做好PK准备.20:05分比赛正式开启,当前时间是:%d:0%d", guildTime.wHour,guildTime.wMinute); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } if(guildTime.wHour == 20 && guildTime.wMinute == 5) // 厮杀开始 { if(guildTime.wSecond >=0 && guildTime.wSecond <=10) { // strMsg.Format("请勇者们打开PK按钮进行厮杀吧!!"); //Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } if(guildTime.wHour >= 20 && guildTime.wMinute > 5 && PKover == FALSE) { if(guildTime.wSecond % 20 == 0) //10秒一次更新人数 { USER* pUser = NULL; for(int i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if(pUser && pUser->m_state == STATE_GAMESTARTED && pUser->m_curz == 67 && pUser->m_bLive == USER_LIVE) PKnum ++; } /*if( PKnum < 4 ) { strMsg.Format("当前人数只有:[ %d ]个,无法进行大赛.",PKnum); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); pUser->ZoneMoveReq(1 ,1301,267); }*/ if( PKnum > 1 && guildTime.wMinute < 15) { strMsg.Format("现在进行激烈的战斗...PK赛场剩余勇士:[ %d ]个.",PKnum); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(PKnum == 1) { for(int i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if(pUser && pUser->m_state == STATE_GAMESTARTED && pUser->m_curz == 67 && pUser->m_bLive == USER_LIVE) { winName = pUser->m_strUserID; pUser->m_PKWin = TRUE; } } PKover = TRUE; } } } if( PKover == TRUE) { if (winName.GetLength() > 0) { strMsg.Format("玩家[ %s ]获得了最终胜利.成为本届PK王!!!",winName); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); PKover = FALSE; } } } //////////////////////////////////////////////////////////////////// //PK杀人狂 // void COM::CheckPKShaRenTime() //PK竞赛 { SYSTEMTIME guildTime; GetLocalTime(&guildTime); DWORD dwCurrTick = GetTickCount(); // 力茄 矫埃郴狼 辨靛傈阑 眉农... COleDateTime CurrTime = COleDateTime(guildTime); int nTime = 0; CString strMsg; if(guildTime.wHour == 20 && guildTime.wMinute>= 15 && guildTime.wMinute< 20) // 20点15分开始公告 { ShaRenEnd = FALSE; ShaRenNum = 0; //初始化最大值 ShaRenName=""; //初始化杀人王名字 nTime = 20 - guildTime.wMinute; if(guildTime.wSecond == 0) { /*strMsg.Format( "PK杀人狂大赛还有 %d 分钟将会开放进入,杀人最多就是胜利!", nTime); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE);*/ } else if(guildTime.wSecond == 30) { //strMsg.Format( "为了大家踊跃参加大赛,每杀一个玩家奖励10个标志,封顶值为200个,也就是杀20个人以下都奖励."); //Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } else if(guildTime.wSecond == 50) { //strMsg.Format( "杀人狂大赛的进入地点是乍旦NPC处选择进入"); //Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } if(guildTime.wHour == 20 && guildTime.wMinute >= 20 && guildTime.wMinute < 50) //活动统计中.. 8:20至 8:50分进行大赛 { nTime = 50 - guildTime.wMinute; if(guildTime.wSecond % 30==0) //20秒一次更新人数 { USER* pUser = NULL; for(int i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if( pUser && pUser->m_state == STATE_GAMESTARTED) { if(pUser->m_dwAutoMoney > ShaRenNum) { ShaRenNum = pUser->m_dwAutoMoney; ShaRenName = pUser->m_strUserID; } } } if( ShaRenName.GetLength() > 0) { /*strMsg.Format("杀人狂大赛消息:现在杀人数最高的玩家是[ %s ],一共杀了[ %d ]名玩家.离活动结束还有[%d]分钟!",ShaRenName,ShaRenNum,nTime); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE);*/ } } } if(guildTime.wHour == 20 && guildTime.wMinute == 50 && guildTime.wSecond == 5) { if( ShaRenName.GetLength() > 0 ) { strMsg.Format("本届杀人狂第一名玩家是[ %s ],一共杀了[ %d ]名玩家!",ShaRenName,ShaRenNum); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); ShaRenEnd = TRUE; } else { //strMsg.Format("本届杀人狂大赛结束!谢谢广大玩家对本游戏的支持!",ShaRenName,ShaRenNum); //Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } } void COM::CheckFortressWarTime() { SYSTEMTIME guildTime; GetLocalTime(&guildTime); DWORD dwCurrTick = GetTickCount(); // 力茄 矫埃郴狼 辨靛傈阑 眉农... COleDateTime CurrTime = COleDateTime(guildTime); SYSTEMTIME st; int i, j; int nTime = 0; static int nState = 0; CString strMsg; CheckViolenceInFortress(); for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; int time = 0; CTimeSpan DiffTime; if(g_arGuildFortress[i]->m_lUsed == 0) { st = g_arGuildFortress[i]->m_wLastWarTime; COleDateTime LastTime(st.wYear, st.wMonth, st.wDay, st.wHour, 0, 0); if(CurrTime.m_status == COleDateTime::valid && LastTime.m_status == COleDateTime::valid) { CTime curr(CurrTime.GetYear(), CurrTime.GetMonth(), CurrTime.GetDay(), CurrTime.GetHour(), 0, 0); CTime last(LastTime.GetYear(), LastTime.GetMonth(), LastTime.GetDay(), LastTime.GetHour(), 0, 0); DiffTime = curr - last; } if(g_arGuildFortress[i]->m_iGuildSid < SYSTEM_GUILD_BAND && g_arGuildFortress[i]->m_iGuildSid > 0) // 惑痢俊 林牢捞 乐绊 { time = (int)DiffTime.GetTotalHours(); if(time >= UPDATE_GUILD_INVEN_TIME) //12盒付促 // time = DiffTime.GetTotalMinutes(); // if(time >= 5) //12盒付促 { SetFortressWarTime(g_arGuildFortress[i]); UpdateFortress2DB(g_arGuildFortress[i]); g_arGuildFortress[i]->m_iGuildDN = 0; GetLocalTime(&g_arGuildFortress[i]->m_wLastWarTime); } } } if(g_arGuildFortress[i]->m_lUsed == 0 && g_arGuildFortress[i]->m_tWarType == GUILD_WAR_DECISION) { if(guildTime.wYear == g_arGuildFortress[i]->m_wPlanWarTime.wYear) { if(guildTime.wMonth == g_arGuildFortress[i]->m_wPlanWarTime.wMonth) { if(guildTime.wDay == g_arGuildFortress[i]->m_wPlanWarTime.wDay) { if(guildTime.wHour + 1 == g_arGuildFortress[i]->m_wPlanWarTime.wHour) { int tt = g_arGuildFortress[i]->m_iStandingTime; if(guildTime.wMinute == g_arGuildFortress[i]->m_iStandingTime) { nTime = 60 - guildTime.wMinute; if(nTime > 0) { if(g_arGuildFortress[i]->m_sFortressID == 1000) { strMsg.Format( IDS_USER_START_LUINET_SOON, nTime); } else if(g_arGuildFortress[i]->m_sFortressID == 1001) { strMsg.Format( IDS_USER_START_SANAD_SOON, nTime); } else if(g_arGuildFortress[i]->m_sFortressID == 1002) { strMsg.Format( IDS_USER_START_ZAMELLYA_SOON, nTime); } Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } g_arGuildFortress[i]->m_iStandingTime += 3; } } if(guildTime.wHour == g_arGuildFortress[i]->m_wPlanWarTime.wHour) { if(guildTime.wMinute == 0 && g_arGuildFortress[i]->m_lUsed == 0)// 辨靛傈 矫累... { if(dwCurrTick - g_arGuildFortress[i]->m_dwStartTick < 70000) continue; g_arGuildFortress[i]->m_iStandingTime = 50; InterlockedExchange(&g_arGuildFortress[i]->m_lUsed, 1); g_arGuildFortress[i]->m_dwStartTick = dwCurrTick; GetLocalTime(&g_arGuildFortress[i]->m_wLastWarTime); SendFortressWarBegin(g_arGuildFortress[i]); } } } } } } else if(g_arGuildFortress[i]->m_lUsed == 1) { SYSTEMTIME ut; ut = g_arGuildFortress[i]->m_wLastWarTime; COleDateTime LastTime(ut.wYear, ut.wMonth, ut.wDay, ut.wHour, 0, 0); if(dwCurrTick - g_arGuildFortress[i]->m_dwStartTick > 60000 * g_dwFortressTime ) // 60盒 抛胶飘 * 2矫埃 { // 辨靛傈捞 场车促. InterlockedExchange(&g_arGuildFortress[i]->m_lUsed, 0); g_arGuildFortress[i]->m_dwStartTick = dwCurrTick; SendFortressWarEnd(g_arGuildFortress[i]); GetLocalTime(&g_arGuildFortress[i]->m_wLastWarTime); } else if(CurrTime.m_status == COleDateTime::valid && LastTime.m_status == COleDateTime::valid) { CTime curr(CurrTime.GetYear(), CurrTime.GetMonth(), CurrTime.GetDay(), CurrTime.GetHour(), CurrTime.GetMinute(), CurrTime.GetSecond()); CTime last(LastTime.GetYear(), LastTime.GetMonth(), LastTime.GetDay(), LastTime.GetHour(), LastTime.GetMinute(), LastTime.GetSecond()); DiffTime = curr - last; nTime =(int)( g_dwFortressTime - DiffTime.GetTotalMinutes()); for(j = 0; j < GUILD_WAR_MAX_MSG_TIMER; j++) { if( g_arGuildFortress[i]->m_MsgTimer[j].iMsgTime == nTime ) { if(g_arGuildFortress[i]->m_MsgTimer[j].iUsed == 1) break; if(g_arGuildFortress[i]->m_sFortressID == 1000) { strMsg.Format( IDS_USER_END_LUINET_SOON, g_arGuildFortress[i]->m_MsgTimer[j].iMsgTime); } else if(g_arGuildFortress[i]->m_sFortressID == 1001) { strMsg.Format( IDS_USER_END_SANAD_SOON, g_arGuildFortress[i]->m_MsgTimer[j].iMsgTime); } else if(g_arGuildFortress[i]->m_sFortressID == 1002) { strMsg.Format( IDS_USER_END_ZAMELLYA_SOON, g_arGuildFortress[i]->m_MsgTimer[j].iMsgTime); } Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); g_arGuildFortress[i]->m_MsgTimer[j].iUsed = 1; break; } } } } } } void COM::CheckDevilTime() //恶魔广场公告 { SYSTEMTIME guildTime; GetLocalTime(&guildTime); DWORD dwCurrTick = GetTickCount(); // 力茄 矫埃郴狼 辨靛傈阑 眉农... COleDateTime CurrTime = COleDateTime(guildTime); int nTime = 0; CString strMsg; if(guildTime.wDayOfWeek == 5)//每星期六开放 { if(guildTime.wHour == 18 && guildTime.wMinute>= 45) // 18点45进行公告! { nTime = 60 - guildTime.wMinute; //计算15分钟后开放 if(guildTime.wSecond == 0) //一分钟公告一次 { strMsg.Format( "恶魔广场还有 %d 分钟开放,请大家做好备战准备!", nTime);//"恶魔广场还有 %d 分钟将会开放,请大家准备就绪!" Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } if(guildTime.wHour == 19 && guildTime.wMinute <= 30) { nTime = 30 - guildTime.wMinute; if(nTime <=5 && guildTime.wSecond == 0) { strMsg.Format( "恶魔广场在 %d 分钟后关闭! 下周六晚上19:00点恶魔广场继续开放!", nTime);//"恶魔广场还有 %d 分钟将会关闭!" Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } } } void COM::ReUpdateFortress2DB(CGuildFortress *pFort) { SQLHSTMT hstmt = NULL; SQLRETURN retcode = 0; TCHAR szSQL[8000]; TCHAR strWarTime[30]; CString strTime = _T(""); if(pFort == NULL) return; ::ZeroMemory(szSQL, sizeof(szSQL)); ::ZeroMemory(strWarTime, sizeof(strWarTime)); SYSTEMTIME guildTime; GetLocalTime(&guildTime); CTime curr(guildTime); curr += CTimeSpan( o_yehuoini[0]->ysjg, 0, 0, 0 ); //本次要塞与下次要塞间隔时间(目前是7天一次) strTime.Format("%d-%d-%d 21:00:00", curr.GetYear(), curr.GetMonth(), curr.GetDay()); ::CopyMemory(strWarTime, strTime.GetBuffer(strTime.GetLength()), strTime.GetLength()); strTime.ReleaseBuffer(); ::ZeroMemory(szSQL, sizeof(szSQL)); _sntprintf(szSQL, sizeof(szSQL), TEXT("{call REUPDATE_GUILD_FORTRESS(%d,%d,\'%s\',\'%s\')}"), pFort->m_sFortressID, pFort->m_iGuildSid, pFort->m_strGuildName, strWarTime); int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return ; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if( retcode != SQL_SUCCESS ) { TRACE("Fail To Update Guild_Store Data Only!!\n"); //g_DBNew[m_iModSid].ReleaseDB(db_index); return ; } if (retcode == SQL_SUCCESS) { retcode = SQLExecDirect(hstmt, (unsigned char *)szSQL, SQL_NTS); if (retcode ==SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO) { } else if (retcode==SQL_ERROR) { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DB[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } } else { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); } void COM::SetHourExp(int type) { /*SYSTEMTIME Time; GetLocalTime(&Time); //晚上18.59分自动开3倍 if(Time.wHour == 18 && Time.wMinute == 59 && Time.wSecond == 30 ) { g_sanJingYan = TRUE; g_sanBaoLv = TRUE; USER *pUser = NULL; for (int i = 0; i < MAX_USER; i++ ) { pUser = GetUserUid(i); if(pUser && pUser->m_state == STATE_GAMESTARTED ) { pUser->SetXingfen(); pUser->SetXingYun(); } } Announce("系统:启动全服3倍兴奋和3倍幸运黄金时间,祝大家游戏愉快!", SYSTEM_ANNOUNCE); } //22点59分30秒自动关3倍 if(Time.wHour == 22 && Time.wMinute == 59 && Time.wSecond == 30) { /*g_sanJingYan = FALSE; g_sanBaoLv = FALSE; USER *pUser = NULL; for (int i = 0; i < MAX_USER; i++ ) { pUser = GetUserUid(i); if(pUser && pUser->m_state == STATE_GAMESTARTED ) { pUser->DelXingfen(); pUser->DelXingYun(); } } Announce("系统:全服3倍兴奋和3倍幸运时间结束,明天晚上19~23点继续开放!", SYSTEM_ANNOUNCE); }*/ } void COM::SendFortressWarBegin(CGuildFortress *pFort) { UpdateFortress2DB(pFort); int mapindex = -1; BOOL bSend = FALSE; int i, j, k, type; int nLen = 0; USER *pUser = NULL; CBufferEx TempBuf; CDWordArray arGuildList; arGuildList.RemoveAll(); if(pFort->m_iGuildSid <= 0) return; // 辨靛傈阑困秦 曼咯吝牢 傈眉 府胶飘甫 父电促. arGuildList.Add(pFort->m_iGuildSid); // 郴辨靛 刚历... for(j =0; j < GUILDFORTRESS_ATTACK_MAX_NUM; j++) // 惑措祈 辨靛... { if(pFort->m_arAttackGuild[j].lUsed == 1) { arGuildList.Add(pFort->m_arAttackGuild[j].lGuild); } } CString strMsg; short nCount = arGuildList.GetSize(); if(nCount <= 1) { strMsg = _T(""); if(pFort->m_sFortressID == 1000) strMsg.Format( IDS_USER_END_LUINET_NO_APPLY ); else if(pFort->m_sFortressID == 1001) strMsg.Format( IDS_USER_END_SANAD_NO_APPLY ); else if(pFort->m_sFortressID == 1002) strMsg.Format( IDS_USER_END_ZAMELLYA_NO_APPLY ); Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_NORMAL); pFort->m_dwStartTick = 0; return; } TempBuf.Add(GUILD_WAR); TempBuf.Add((BYTE)0x01); // 辨靛傈 矫累... TempBuf.Add((BYTE)GUILD_FOTRESS_WARRING); // 惑痢傈... TempBuf.Add((short)pFort->m_sFortressID); // 秦寸 瘤开 牢郸胶 strMsg = _T(""); if(pFort->m_sFortressID == 1000) strMsg.Format( IDS_USER_START_LUINET ); else if(pFort->m_sFortressID == 1001) strMsg.Format( IDS_USER_START_SANAD ); else if(pFort->m_sFortressID == 1002) strMsg.Format( IDS_USER_START_ZAMELLYA ); for(i = 0; i < MAX_USER; i++) { type = 0; nLen = 0; mapindex = -1; pUser = GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; pUser->SendSystemMsg((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE, TO_ME); type = ((g_zone[pUser->m_ZoneIndex]->m_pMap[pUser->m_curx][pUser->m_cury].m_dwType & 0xFF00 ) >> 8); //if(type >= 0 && type < 17) mapindex = pUser->GetGuildMapIndex(type); if(mapindex >= 0) { //辨靛傈捞 老绢唱绰 惑痢瘤开捞搁 if(g_arMapTable[mapindex]->m_sStoreID == pFort->m_sFortressID)// && g_arMapTable[type]->m_sStoreZone) { // 家蜡 辨靛啊 绝栏搁 葛滴 付阑肺 焊辰促. if(pFort->m_iGuildSid != pUser->m_dwGuild && pUser->m_tIsOP != 1 ) pUser->TownPotal(); } } CBufferEx DataBuf; DataBuf.AddData(TempBuf, TempBuf.GetLength()); nLen = strlen(pUser->m_strGuildName); if(pUser->m_dwGuild <= 0 || nLen <= 0) { DataBuf.Add((BYTE)0x00); // 老馆蜡历 pUser->Send(DataBuf, DataBuf.GetLength()); continue; } if(pFort->m_iGuildSid == pUser->m_dwGuild) { pUser->BeginFortressWar(); // 辨靛 傈阑 矫累茄促. DataBuf.Add((BYTE)0x01); // 秦寸 辨靛 蜡历 DataBuf.Add(nCount); // 醚 辨靛荐 for(j = 0; j < nCount; j++) { DataBuf.Add(arGuildList[j]); } pUser->Send(DataBuf, DataBuf.GetLength()); } else { for(j =0; j < GUILDFORTRESS_ATTACK_MAX_NUM; j++) { if(pFort->m_arAttackGuild[j].lUsed == 0) continue; // if(strcmp(pFort->m_arAttackGuild[j].strGuildName, pUser->m_strGuildName) == 0) if(pFort->m_arAttackGuild[j].lGuild == pUser->m_dwGuild) { pUser->BeginFortressWar(); // 辨靛 傈阑 矫累茄促. DataBuf.Add((BYTE)0x01); // 秦寸 辨靛 蜡历 DataBuf.Add(nCount); // 醚 辨靛荐 for(k = 0; k < nCount; k++) { DataBuf.Add(arGuildList[k]); } bSend = TRUE; pUser->Send(DataBuf, DataBuf.GetLength()); break; } } if(!bSend) { DataBuf.Add((BYTE)0x00); // 老馆蜡历 pUser->Send(DataBuf, DataBuf.GetLength()); } } } pFort->SetNpcToFortressWarBegin(); } ///////////////////////////////////////////////////////////////////////////////////////// // void COM::SendFortressWarEnd(CGuildFortress *pFort) { int mapindex; int i, type; USER *pUser = NULL; pFort->SetNpcToFortressWarEnd(this); CString strMsg = _T(""); strMsg.Format( IDS_USER_END_FORTRESS, pFort->m_strGuildName); CBufferEx TempBuf; TempBuf.Add(GUILD_WAR); TempBuf.Add((BYTE)0x02); // 辨靛傈 辆丰... TempBuf.Add((BYTE)GUILD_FOTRESS_WARRING); TempBuf.Add((short)pFort->m_sFortressID); // Send(TempBuf, TempBuf.GetLength()); for(i = 0; i < MAX_USER; i++) { mapindex = -1; pUser = GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; type = pUser->CheckInvalidMapType(); mapindex = pUser->GetGuildMapIndex(type); if(mapindex >= 0) { //辨靛傈捞 老绢唱绰 惑痢瘤开捞搁 if(g_arMapTable[mapindex]->m_sStoreID == pFort->m_sFortressID) //&& g_arMapTable[type]->m_sStoreZone) { // 规绢辨靛盔捞 酒聪搁 葛滴 付阑肺 if(pFort->m_iGuildSid != pUser->m_dwGuild && pUser->m_tIsOP != 1) pUser->TownPotal(); } } pUser->m_tFortressWar = GUILD_WAR_AFFTER; pUser->Send(TempBuf, TempBuf.GetLength()); if(pFort->m_iZone == pUser->m_curz) { if(pFort->m_iGuildSid != pUser->m_dwGuild && pUser->m_tIsOP != 1 ) pUser->TownPotal(); } pUser->SendSystemMsg((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE, TO_ME); } pFort->UpdateInitFortress(); ReUpdateFortress2DB(pFort); } void COM::RemoveUserInGuildHouse() { int i, j; USER *pUser = NULL; for(i = 0; i < MAX_USER; i++) { pUser = GetUserUid(i); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; if(pUser->m_dwGuild <= 0) continue; for(j = 0; j < g_arGuildHouse.GetSize(); j++) { if(g_arGuildHouse[j]->iZone == pUser->m_curz) { if(g_arGuildHouse[j]->iGuild != pUser->m_dwGuild && pUser->m_tIsOP != 1 ) { pUser->TownPotal(); break; } } } } } void COM::UpdateFortress2DB(CGuildFortress *pFort) { SQLHSTMT hstmt = NULL; SQLRETURN retcode = 0; BOOL bQuerySuccess = TRUE; TCHAR szSQL[8000]; int i; if(pFort == NULL) return; TCHAR strAttackList[_GUILDLIST_DB], strRepairList[_GUILDREPAIRLIST_DB]; ::ZeroMemory(szSQL, sizeof(szSQL)); ::ZeroMemory(strRepairList, sizeof(strRepairList)); ::ZeroMemory(strAttackList, sizeof(strAttackList)); pFort->GuildAttListToStr(strAttackList); pFort->FortressRepairListToStr(strRepairList); SDWORD sAttLen = sizeof(strAttackList); SDWORD sRepairLen = sizeof(strRepairList); _sntprintf(szSQL, sizeof(szSQL), TEXT("{call UPDATE_GUILD_FORTRESS(%d,%d,\'%s\',%d,%d,%d,?,?)}"), pFort->m_sFortressID, pFort->m_iGuildSid, pFort->m_strGuildName, pFort->m_tTaxRate, pFort->m_iGuildDN, pFort->m_tWarType); int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return ; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if( retcode != SQL_SUCCESS ) { TRACE("Fail To Update Guild_Store Data Only!!\n"); //g_DBNew[m_iModSid].ReleaseDB(db_index); return ; } if (retcode == SQL_SUCCESS) { i = 1; SQLBindParameter( hstmt, i++, SQL_PARAM_INPUT, SQL_C_BINARY, SQL_VARBINARY, sizeof(strAttackList),0, (TCHAR*)strAttackList, 0, &sAttLen ); SQLBindParameter( hstmt, i++, SQL_PARAM_INPUT, SQL_C_BINARY, SQL_VARBINARY, sizeof(strRepairList),0, (TCHAR*)strRepairList, 0, &sRepairLen ); retcode = SQLExecDirect(hstmt, (unsigned char *)szSQL, SQL_NTS); if (retcode ==SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO) { } else if (retcode==SQL_ERROR) { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } } else { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); if( !bQuerySuccess ) return ; } void COM::UpdateFortressMem2DB(CGuildFortress *pFort) { /* int i; TCHAR strAttackList[_GUILDLIST_DB], strRepairList[_GUILDREPAIRLIST_DB]; if(pFort == NULL) return; ::ZeroMemory(strRepairList, sizeof(strRepairList)); ::ZeroMemory(strAttackList, sizeof(strAttackList)); pFort->GuildAttListToStr(strAttackList); pFort->FortressRepairListToStr(strRepairList); SDWORD sAttLen = sizeof(strAttackList); SDWORD sRepairLen = sizeof(strRepairList); CSharedMemory* pShared = NULL; CMemFortress* pMDFort = NULL; for(i = 0; i < g_arFortressSharedMemory.GetSize(); i++) { pShared = g_arFortressSharedMemory[i]; if(pShared == NULL) continue; if(pShared->m_hMapping == NULL) continue; pMDFort = (CMemFortress*) pShared->m_lpData; if(pMDFort == NULL) continue; if(pMDFort->m_sFortressID <= 0) continue; if(pMDFort->m_sFortressID == pFort->m_sFortressID) { pMDFort->m_iGuildSid = pFort->m_iGuildSid; pMDFort->m_sTaxRate = 0; pMDFort->m_dwStoreDN = 0; pMDFort->m_tWarType = GUILD_WAR_PREPARE; pFort->FortressRepairListToStr(pMDFort->m_RepairList); pFort->GuildAttListToStr(pMDFort->m_AttackList); return; } } */ } void COM::LoadFortressCityDegree(CGuildFortress *pFort) { /* if(!pFort) return; if(!pFort->m_bHaveGuild) return; if(pFort->m_iGuildSid <= 0) return; int i; SQLHSTMT hstmt = NULL; SQLRETURN retcode; TCHAR szSQL[1024]; SQLINTEGER iRet = -1; SQLINTEGER iRetInd = SQL_NTS; SQLINTEGER iCityDegree; ::ZeroMemory(szSQL, sizeof(szSQL)); iCityDegree = 0; _sntprintf(szSQL, sizeof(szSQL), TEXT("{call LOAD_GUILD_FORTRESS_CITYDEGREE(%d, ?)}"), pFort->m_iGuildSid); // SQLINTEGER sInd; int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if( retcode != SQL_SUCCESS ) { TRACE("Fail To Load Guild_Store Attack List Data !!\n"); // g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } i = 1; SQLBindParameter( hstmt, i++ ,SQL_PARAM_OUTPUT,SQL_C_SLONG, SQL_INTEGER,0,0, &iRet,0, &iRetInd); retcode = SQLExecDirect( hstmt, (unsigned char*)szSQL, SQL_NTS); if( retcode == SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO ) { retcode = SQLFetch( hstmt ); if( retcode == SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO ) { // SQLGetData( hstmt, i++, SQL_C_SLONG, &iCityDegree,sizeof(iCityDegree),&sInd ); // SQLGetData( hstmt, i++, SQL_C_SSHORT, &sCityDegree, sizeof(sCityDegree), &sInd ); // SQLGetData( hstmt, i++, SQL_C_ULONG, &sCityDegree, sizeof(sCityDegree), &sInd ); } } else { DisplayErrorMsg(hstmt); retcode = SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); // BREAKPOINT(); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } retcode = SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); // pFort->m_iCityDegree = iRet; */ } void COM::SetFortressWarTime(CGuildFortress *pFort) { CString strTime; SQLHSTMT hstmt = NULL; SQLRETURN retcode = 0; BOOL bQuerySuccess = TRUE; TCHAR szSQL[8000]; TCHAR strWarTime[30]; SYSTEMTIME guildTime; CTimeSpan DiffTime; if(pFort->m_iGuildSid <= 0) return; int iYear = 0; int iMon = 0; int iDay = 0; long temp = -1; strTime = _T(""); ::ZeroMemory(szSQL, sizeof(szSQL)); ::ZeroMemory(strWarTime, sizeof(strWarTime)); GetLocalTime(&guildTime); CTime curr(guildTime); CTime last(pFort->m_wPlanWarTime); DiffTime = last - curr; temp = (int)DiffTime.GetDays(); if(temp < 0) { curr += CTimeSpan( o_yehuoini[0]->ysjg, 0, 0, 0 ); //本次要塞与下次要塞间隔时间(目前是7天一次) iYear = curr.GetYear(); iMon = curr.GetMonth(); iDay = curr.GetDay(); } else { if(pFort->m_tWarType == GUILD_WAR_DECISION) return; iYear = last.GetYear(); iMon = last.GetMonth(); iDay = last.GetDay(); } pFort->m_wPlanWarTime.wYear = iYear; pFort->m_wPlanWarTime.wMonth = iMon; pFort->m_wPlanWarTime.wDay = iDay; strTime.Format("%d-%d-%d 21:00:00", iYear, iMon, iDay); ::CopyMemory(strWarTime, strTime.GetBuffer(strTime.GetLength()), strTime.GetLength()); strTime.ReleaseBuffer(); SDWORD sLen = sizeof(strWarTime); ::ZeroMemory(szSQL, sizeof(szSQL)); _sntprintf(szSQL, sizeof(szSQL), TEXT("{call UPDATE_GUILD_FORTRESS_WARTIME(%d,%d,\'%s\')}"), pFort->m_sFortressID, pFort->m_iGuildSid, strWarTime); int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return ; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if( retcode != SQL_SUCCESS ) { TRACE("Fail To Update Fortress War Data Only!!\n"); return ; } if (retcode == SQL_SUCCESS) { retcode = SQLExecDirect(hstmt, (unsigned char *)szSQL, SQL_NTS); if (retcode ==SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO) { } else if (retcode==SQL_ERROR) { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } } else { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); pFort->m_wPlanWarTime.wYear = iYear; pFort->m_wPlanWarTime.wMonth = iMon; pFort->m_wPlanWarTime.wDay = iDay; pFort->m_wPlanWarTime.wHour = 21; pFort->m_tWarType = GUILD_WAR_DECISION;// 12矫埃 傈俊 搬沥捞 救登绢 乐促搁 官肺 被洒扁 累诀俊 甸绢埃促. UpdateMemFortressWarType(pFort); } void COM::UpdateMemFortressWarType(CGuildFortress *pFort) { if(pFort == NULL) return; int nSize = g_arFortressSharedMemory.GetSize(); CSharedMemory* pShared = NULL; CMemFortress* pData = NULL; for(int i = 0; i < nSize; i++) { pShared = g_arFortressSharedMemory[i]; if(pShared == NULL) return; if(pShared->m_hMapping == NULL) return; pData = (CMemFortress*) pShared->m_lpData; if(pData == NULL) return; if(pData->m_iGuildSid == pFort->m_iGuildSid) { pData->m_tWarType = pFort->m_tWarType; break; } } } ///////////////////////////////////////////////////////////////////////////////////////// // 辨靛啊 家蜡窍绊 乐栏搁 刮缴荐摹甫 犬牢茄促. // void COM::CheckViolenceInFortress() { SYSTEMTIME guildTime; GetLocalTime(&guildTime); DWORD dwCurrTick = GetTickCount(); // 力茄 矫埃郴狼 辨靛傈阑 眉农... COleDateTime CurrTime = COleDateTime(guildTime); SYSTEMTIME st; int i; int nTime = 0; static int nState = 0; CString strMsg; for(i = 0; i < g_arGuildFortress.GetSize(); i++) { int time = 0; CTimeSpan DiffTime; if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_lViolenceUsed == 0) { st = g_arGuildFortress[i]->m_wMopPartyTime; COleDateTime MopTime(st.wYear, st.wMonth, st.wDay, st.wHour, 0, 0); if(MopTime.GetYear() >= 2030) { if(g_arGuildFortress[i]->GetTotalCityValue() > 60) // 弥厩狼 刮缴俊 酒流 DB俊 馆康捞 救登搁 静绊 墨款飘 促款 矫累... { GetLocalTime(&g_arGuildFortress[i]->m_wMopPartyTime); SetFortressViolenceTime(g_arGuildFortress[i], g_arGuildFortress[i]->m_iGuildSid, FORTRESS_VIOLENCE_WARRING); } continue; } if(CurrTime.m_status == COleDateTime::valid && MopTime.m_status == COleDateTime::valid) { CTime curr(CurrTime.GetYear(), CurrTime.GetMonth(), CurrTime.GetDay(), CurrTime.GetHour(), 0, 0); CTime last(MopTime.GetYear(), MopTime.GetMonth(), MopTime.GetDay(), MopTime.GetHour(), 0, 0); DiffTime = curr - last; } if(g_arGuildFortress[i]->m_iGuildSid < SYSTEM_GUILD_BAND && g_arGuildFortress[i]->m_iGuildSid > 0) // 惑痢俊 林牢捞 乐绊 { time = (int)DiffTime.GetTotalHours(); if(time >= UPDATE_GUILD_INVEN_TIME) //12矫埃付促 { g_arGuildFortress[i]->m_wMopPartyTime.wYear = 2030; SetFortressViolenceTime(g_arGuildFortress[i], g_arGuildFortress[i]->m_iGuildSid, FORTRESS_VIOLENCE_AFFTER); if(g_arGuildFortress[i]->m_sFortressID == 1000) { strMsg.Format( IDS_USER_RIOT_LUINET ); } else if(g_arGuildFortress[i]->m_sFortressID == 1001) { strMsg.Format( IDS_USER_RIOT_SANAD ); } else if(g_arGuildFortress[i]->m_sFortressID == 1002) { strMsg.Format( IDS_USER_RIOT_ZAMELLYA ); } Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); g_arGuildFortress[i]->m_dwViolenceTick = dwCurrTick; g_arGuildFortress[i]->SetNpcToFortressViolenceBegin(); InterlockedExchange(&g_arGuildFortress[i]->m_lViolenceUsed, 1); } } } else if(g_arGuildFortress[i]->m_lViolenceUsed == 1) { if(dwCurrTick - g_arGuildFortress[i]->m_dwViolenceTick > 60000 * 10) // 10盒 抛胶飘 { // 场车促. InterlockedExchange(&g_arGuildFortress[i]->m_lViolenceUsed, 0); g_arGuildFortress[i]->m_dwStartTick = 0; SetNpcToViolenceFortressWarEnd(g_arGuildFortress[i]); // SendFortressWarEnd(g_arGuildFortress[i]); } } } } void COM::SetFortressViolenceTime(CGuildFortress *pFort, int iGuild, int iType) { CString strTime; SQLHSTMT hstmt = NULL; SQLRETURN retcode = 0; BOOL bQuerySuccess = TRUE; TCHAR szSQL[8000]; CTimeSpan DiffTime; if(pFort->m_iGuildSid <= 0) return; ::ZeroMemory(szSQL, sizeof(szSQL)); _sntprintf(szSQL, sizeof(szSQL), TEXT("{call UPDATE_GUILD_FORTRESS_VIOLENCETIME(%d,%d,%d)}"), pFort->m_sFortressID, iGuild, iType); int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return ; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if( retcode != SQL_SUCCESS ) { TRACE("Fail To Update Fortress War Data Only!!\n"); return ; } if (retcode == SQL_SUCCESS) { retcode = SQLExecDirect(hstmt, (unsigned char *)szSQL, SQL_NTS); if (retcode ==SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO) { } else if (retcode==SQL_ERROR) { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } } else { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return ; } if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); } void COM::SetNpcToViolenceFortressWarEnd(CGuildFortress *pFort) { BOOL bChange = FALSE; CString strMsg = _T(""); bChange = pFort->SetNpcToFortressViolenceEnd(this); if(bChange) { SetFortressViolenceTime(pFort, 900000, FORTRESS_VIOLENCE_AFFTER); pFort->SetInitFortressViolence(); pFort->InitMemFortress(GUILD_WAR_DECISION); if(pFort->m_sFortressID == 1000) { strMsg.Format( IDS_USER_RIOT_LUINET_RETURN ); } else if(pFort->m_sFortressID == 1001) { strMsg.Format( IDS_USER_RIOT_SANAD_RETURN ); } else if(pFort->m_sFortressID == 1002) { strMsg.Format( IDS_USER_RIOT_ZAMELLYA_RETURN ); } Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); pFort->GetOutof(this); } } <file_sep>/GuildFortressSet.cpp // GuildFortressSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "GuildFortressSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CGuildFortressSet IMPLEMENT_DYNAMIC(CGuildFortressSet, CRecordset) CGuildFortressSet::CGuildFortressSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CGuildFortressSet) m_sFortressID = 0; m_iGuildSid = 0; m_strGuildName = _T(""); m_sTaxRate = 0; m_iGuildDN = 0; m_tWarType = 0; m_sPotalX = 0; m_sPotalY = 0; m_sZone = 0; m_nFields = 17; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CGuildFortressSet::GetDefaultConnect() { return _T("ODBC;DSN=drgamenew;UID=drgame;PWD=<PASSWORD>"); } CString CGuildFortressSet::GetDefaultSQL() { return _T("[dbo].[GUILD_FORTRESS]"); } void CGuildFortressSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CGuildFortressSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sFortressID]"), m_sFortressID); RFX_Long(pFX, _T("[iGuildSid]"), m_iGuildSid); RFX_Text(pFX, _T("[strGuildName]"), m_strGuildName); RFX_Int(pFX, _T("[sTaxRate]"), m_sTaxRate); RFX_Long(pFX, _T("[iGuildDN]"), m_iGuildDN); RFX_Date(pFX, _T("[GetTime]"), m_GetTime); RFX_Date(pFX, _T("[WarTime]"), m_WarTime); RFX_Date(pFX, _T("[WarPlan]"), m_WarPlan); RFX_Byte(pFX, _T("[tWarType]"), m_tWarType); RFX_Binary(pFX, _T("[HireGuardList]"), m_HireGuardList); RFX_Binary(pFX, _T("[strAttackList]"), m_strAttackList); RFX_Binary(pFX, _T("[strRepairList]"), m_strRepairList); RFX_Int(pFX, _T("[sPotalX]"), m_sPotalX); RFX_Int(pFX, _T("[sPotalY]"), m_sPotalY); RFX_Int(pFX, _T("[sZone]"), m_sZone); RFX_Date(pFX, _T("[MopPartyTime]"), m_MopPartyTime); RFX_Date(pFX, _T("[ReleaseTime]"), m_ReleaseTime); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CGuildFortressSet diagnostics #ifdef _DEBUG void CGuildFortressSet::AssertValid() const { CRecordset::AssertValid(); } void CGuildFortressSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/VirtualRoom.cpp // VirtualRoom.cpp: implementation of the CVirtualRoom class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "VirtualRoom.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CVirtualRoom::CVirtualRoom() { } CVirtualRoom::~CVirtualRoom() { } <file_sep>/VirtualRoom.h // VirtualRoom.h: interface for the CVirtualRoom class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_VIRTUALROOM_H__716F2B61_0DCD_49D9_9279_A58C294C3AE6__INCLUDED_) #define AFX_VIRTUALROOM_H__716F2B61_0DCD_49D9_9279_A58C294C3AE6__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CVirtualRoom { public: int m_iCity; short m_sZone; POINT m_Move[MAX_GUILD_HOUSE_USER]; CVirtualRoom(); virtual ~CVirtualRoom(); }; #endif // !defined(AFX_VIRTUALROOM_H__716F2B61_0DCD_49D9_9279_A58C294C3AE6__INCLUDED_) <file_sep>/resource.h //{{NO_DEPENDENCIES}} // Microsoft Visual C++ generated include file. // Used by Server.rc // #define IDS_TEST 1 #define IDC_NEW_ANNOUNCE 3 #define IDQUANOUT 4 #define IDM_ABOUTBOX 0x0010 #define IDD_ABOUTBOX 100 #define IDS_ABOUTBOX 101 #define IDD_SERVER_DIALOG 102 #define IDS_USER_SHENGSHI 102 #define IDS_EVENT_ATT7_ITEM 103 #define IDS_EVENT_DEF7_ITEM 104 #define IDS_EVENT_ATT6_ITEM 105 #define IDS_EVENT_DEF6_ITEM 106 #define IDS_EVENT_ATT5_ITEM 107 #define IDS_EVENT_DEF5_ITEM 108 #define IDS_EVENT_ATT4_ITEM 109 #define IDS_EVENT_DEF4_ITEM 110 #define IDS_EVENT_ATT3_ITEM 111 #define IDS_EVENT_DEF3_ITEM 112 #define IDS_EVENT_RESET_STAT 113 #define IDS_EVENT_PERSONAL 114 #define IDS_SERVERDLG_SERVERINFOERROR 115 #define IDS_SERVERDLG_GIANTBIGFOOT 116 #define IDS_SERVERDLG_STOREERROR 117 #define IDS_SERVERDLG_CURRENTUSER 118 #define IDS_SERVERDLG_OPERATOR 119 #define IDS_USER_UNDER_CONSTRUCTION 120 #define IDS_USER_DOUBLE_ACCOUNT 121 #define IDS_USER_DOUBLE_CHAR 122 #define IDS_USER_OPERATOR_MAIL1 123 #define IDS_USER_OPERATOR_MAIL2 124 #define IDS_USER_GUILD_DEFEAT 125 #define IDS_USER_NOT_ENOUGH_STAMINA 126 #define IDS_USER_PK_COUNT 127 #define IDR_MAINFRAME 128 #define IDS_USER_INVALID_MONSTER_NAME 129 #define IDS_USER_INVALID_POINT_CALL_MONSTER 130 #define IDS_USER_TOO_LOW_CITY_VALUE 131 #define IDS_USER_TOO_LOW_OTHER_CITY_VALUE 132 #define IDS_USER_TOO_LOW_LEVEL_FOR_SHOUT 133 #define IDS_USER_NOT_ENOUGH_STAMINA_FOR_SHOUT 134 #define IDS_USER_MAKE_BUDDY_FIRST 135 #define IDS_USER_ITEM_RECEIVE_DENIED 136 #define IDS_USER_ITEM_RECEIVE_PERMITTED 137 #define IDS_USER_NOT_CONNECTED_NOW 138 #define IDS_USER_HAVE_NOT_PERMISSION 139 #define IDS_USER_INVALID_BUDDY_CONDITION 140 #define IDS_USER_REQUEST_DENIED 141 #define IDS_USER_BUDDY_LEVEL 142 #define IDS_USER_BUDDY_ALREADY 143 #define IDS_USER_BUDDY_MAX_USER 144 #define IDS_USER_BBS_DELETED_ALREADY 145 #define IDS_USER_NEED_OPERATOR 146 #define IDS_USER_BBS_NO_PERMISSION 147 #define IDS_USER_BBS_CANT_DELETE 148 #define IDS_USER_LEVEL_DOWN 149 #define IDS_USER_NOT_ENOUGH_PP 150 #define IDS_USER_ITEM_DAMAGED 151 #define IDS_USER_SWORD_PACK_EMPTY 152 #define IDS_USER_BULLET_EMPTY 153 #define IDS_USER_ATTACK_FROM 154 #define IDS_USER_HIGHEST_LEVEL 155 #define IDS_USER_ITEM_TOO_LOW_ABILITY 156 #define IDS_USER_ITEM_OTHER_CLASS 157 #define IDS_USER_BRAWL 158 #define IDS_USER_STAFF 159 #define IDS_USER_EDGED 160 #define IDS_USER_FIREARMS 161 #define IDS_USER_CANT_USER_THIS 162 #define IDS_USER_CANT_USE_THIS 162 #define IDS_USER_DINA_RECEIVE_FROM 163 #define IDS_USER_CANT_GIVE_EVENT_ITEM 164 #define IDS_USER_OVER_WEIGHT 165 #define IDS_USER_ITEM_RECEIVE_FROM 166 #define IDS_USER_ITEM_GIVE_TO 167 #define IDS_USER_CANT_THROW_IN_QUEST 168 #define IDS_USER_CANT_THROW_EVENT_ITEM 169 #define IDS_USER_ITEM_OVERFLOW 170 #define IDS_USER_CANT_BUYSELL_EVENT_ITEM 171 #define IDS_USER_OVER_WEIGHT_OTHER 172 #define IDS_USER_OVER_WEIGHT_SAVE 173 #define IDS_USER_OVER_WEIGHT_SAVE_OTHER 174 #define IDS_USER_OVER_COUNT_SAVE_OTHER 175 #define IDS_USER_EXCHANGE_COMPLETED 176 #define IDS_USER_NOT_THIS_LEVEL 177 #define IDS_USER_ONLY_GUILD_MASTER_USE 178 #define IDS_USER_OVER_WEIGHT_RECEIVE_ITEM 179 #define IDS_USER_INVEN_FULL_RECEIVE_ITEM 180 #define IDS_USER_NOT_ENOUGH_DINA 181 #define IDS_USER_NOT_THIS_GUILD_MEMBER 182 #define IDS_USER_NOT_YET_FIX_SCHEDULE 183 #define IDS_USER_ONLY_GUILD_MASTER_WAR 184 #define IDS_USER_CANT_APPLY_IN_WAR 185 #define IDS_USER_CANT_APPLY_TO_MY_GUILD 186 #define IDS_USER_CANT_APPLY_HAVE_FORTRESS 187 #define IDS_USER_ALREADY_APPLY_OTHER_GUILD 188 #define IDS_USER_OVERFLOW_GUILD_WAR_ENEMY 189 #define IDS_USER_APPLIED_GUILD_WAR 190 #define IDS_USER_NOT_FIX_SCHEDULE 191 #define IDS_USER_APPLY_CONDITION_LEVEL 192 #define IDS_USER_APPLY_CONDITION_GMEMBER 193 #define IDS_USER_CANT_APPLY_HAVE_HOUSE 194 #define IDS_USER_ALREADY_LUENET_FORTRESS 195 #define IDS_USER_ALREADY_LUINET_FORTRESS 195 #define IDS_USER_ALREADY_SANAD_FORTRESS 196 #define IDS_USER_APPLY_FAIL 197 #define IDS_USER_APPLY_COMPLETED 198 #define IDS_USER_NEED_NOT_CURE 199 #define IDS_USER_NOT_ENOUGH_DINA_CURE 200 #define IDS_USER_CURE_COMPLETED 201 #define IDS_USER_CANT_USE_YOUR_CITY_RANK 202 #define IDS_USER_CANT_USE_IN_GUILD_WAR 203 #define IDS_USER_NOT_ENOUGH_DINA1 204 #define IDS_USER_OVER_WEIGHT1 205 #define IDS_USER_BUY_FAIL 206 #define IDS_USER_FULL_INVEN 207 #define IDS_USER_NOT_THIS_STORE_BUYSELL 208 #define IDS_USER_CANT_SELL_EVENT_ITEM 209 #define IDS_USER_SELL_FAIL 210 #define IDS_USER_GAME_TIME_END 211 #define IDS_USER_GAME_TIME_REMAIN 212 #define IDS_USER_FORCE_END_LATER_FEW_MINUTE 213 #define IDS_USER_FREE_SERVICE_LEVEL 214 #define IDS_USER_FIRE_DAMAGE_RESET 215 #define IDS_USER_COLD_DAMAGE_RESET 216 #define IDS_USER_COLD_DAMAGED 217 #define IDS_USER_FIRE_DAMAGED 218 #define IDS_USER_ONCE_MOVE_MAX 219 #define IDS_USER_SAVE_MAX_COUNT 220 #define IDS_USER_SYSTEM_MSG01 221 #define IDS_USER_GUILD_WAR_WINNER 222 #define IDS_USER_CALL_MONSTER_COUNT_MAX 223 #define IDS_USER_CALL_FAIL 224 #define IDS_USER_CALL_WHAT 225 #define IDS_USER_ALREADY_GUILD_IN 226 #define IDS_USER_ALREADY_OTHER_GUILD_IN 227 #define IDS_USER_GUILD_MAKE_LEVEL 228 #define IDS_USER_GUILD_MAKE_DINA 229 #define IDS_USER_GUILD_MAKE_CITY_RANK 230 #define IDS_USER_SEE_EACH_OTHER 231 #define IDS_USER_ENTER_SUCCRESS 232 #define IDS_USER_NEW_MEMBER 233 #define IDS_USER_NO_OUT_IN_WAR 234 #define IDS_USER_OUT_GUILD 235 #define IDS_USER_OUT_MEMBER 236 #define IDS_USER_CANT_DISMISS_IN_WAR 237 #define IDS_USER_CANT_DISMISS_FORTRESS 238 #define IDS_USER_DISMISS_COMPLETED 239 #define IDS_USER_CANT_DISMISS_IN_BANK 240 #define IDS_USER_KICKOUT_GUILD 241 #define IDS_USER_KICKOUT_MEMBER 242 #define IDS_USER_OPERATOR 243 #define IDS_USER_CANT_USER_GUILD_BANK_NOW 244 #define IDS_USER_CANT_USE_GUILD_BANK_NOW 244 #define IDS_USER_GUILD_BANK_IN_USE 245 #define IDS_USER_CANT_SHARE_EVENT_ITEM 246 #define IDS_USER_NO_PERMISSION_USE 247 #define IDS_USER_ALREADY_OTHER_GUILD_USE 248 #define IDS_USER_CANT_UPGRADE_ITEM 249 #define IDS_USER_NOT_ENOUGH_PAY 250 #define IDS_USER_ONLY_USE_GUILD_MASTER 251 #define IDS_USER_TEX_CONTROL 252 #define IDS_USER_CANT_USE_IN_DEFENCE 253 #define IDS_USER_TEX_MAX 254 #define IDS_USER_INVALID_YEAR 255 #define IDS_USER_INVALID_MONTH 256 #define IDS_USER_INVALID_DAY 257 #define IDS_USER_INVALID_HOUR 258 #define IDS_USER_ALREADY_EXIST_SCHEDULE 259 #define IDS_USER_GUILD_WAR_DELAY 260 #define IDS_USER_PASS_MIN_DAY 261 #define IDS_USER_INVALID_TIME 262 #define IDS_USER_SCHEDULE_FIX 263 #define IDS_USER_EDIT_SCHEDULE_FAIL 264 #define IDS_USER_SCHEDULE_FIXED 265 #define IDS_USER_GUILD_WAR_DELAY1 266 #define IDS_USER_GUILD_WAR_DELAY2 267 #define IDS_USER_FORTRESS_SCHEDULE_FIXED 268 #define IDS_USER_START_GUILD_WAR 269 #define IDS_USER_END_GUILD_WAR 270 #define IDS_USER_THIS_AREA_GUILD_HAVE 271 #define IDS_USER_IN_ONE_SCREEN 272 #define IDS_USER_INVALID_MEMBER_COUNT 273 #define IDS_USER_QUEST_TIME 274 #define IDS_USER_CANT_REVIVE_IN_WAR 275 #define IDS_USER_SYSTEM_MSG02 276 #define IDS_USER_SPEED_HACK 277 #define IDS_USER_NO_HANDYGATE 278 #define IDS_USER_NO_HANDYGATE_AREA 279 #define IDS_USER_ALREADY_HANDYGATE_SAVE 280 #define IDS_USER_HANDYGATE_SAVE 281 #define IDS_USER_HANDYGATE_DELETE 282 #define IDS_USER_FIRST_REPAIR 283 #define IDS_USER_NO_USER_ID 284 #define IDS_USER_INVALID_USER_ID 285 #define IDS_USER_NO_COPYCHIP 286 #define IDS_USER_INVALID_AXIS 287 #define IDS_USER_REVIEW_THIS_AXIS 288 #define IDS_USER_CANT_USE_AREA 289 #define IDS_USER_MOVE_FAIL 290 #define IDS_USER_WHO_MOVE_FAIL 291 #define IDS_USER_CURRENT_USER_COUNT 292 #define IDS_USER_NO_CHANGE_THIS 293 #define IDS_USER_CHANGE_COMPLETED 294 #define IDS_USER_GET_FORTRESS_SUCCESS 295 #define IDS_USER_REG_ADDR_FAIL 296 #define IDS_USER_MUNHWA_PRESENT 297 #define IDS_USER_DEPT_PRESENT 298 #define IDS_USER_REG_ADDR 299 #define IDS_USER_SYSTEM_MSG03 300 #define IDS_USER_SYSTEM_MSG04 301 #define IDS_USER_NOT_GUILD_MASTER 302 #define IDS_USER_UNCLEAR_FORTRESS 303 #define IDS_USER_NOT_APPLY_GUILD 304 #define IDS_USER_UNKNOWN_ERROR 305 #define IDS_USER_NO_MORE_REPAIR 306 #define IDS_USER_FAIL 307 #define IDS_USER_NOT_ENOUGH_RAPAIR_DINA 308 #define IDS_USER_NOT_ENOUGH_REPAIR_DINA 308 #define IDS_USER_NOT_ENOUGH_REPAIR_HAVEDINA 309 #define IDS_USER_REPAIR_COMPLETED 310 #define IDS_USER_NO_PERMISSION_IN_WAR 311 #define IDS_USER_UNTIL_TODAY_YOUR_GAMETIME 312 #define IDS_USER_REMAIN_ACCOUNT_USE 313 #define IDS_USER_REMAIN_GAMETIME_MIN 314 #define IDS_USER_REMAIN_GAMEROOMTIME 315 #define IDS_USER_CANT_DRESSING_ITEM 316 #define IDS_USER_INVALID_DRESSING_COUNT 317 #define IDS_USER_DRESSING_COUNT_FORMAT 318 #define IDS_USER_CANT_DRESSING 319 #define IDS_USER_DRESSING_NO_RESULT 320 #define IDS_USER_DRESSING_RESULT 321 #define IDS_USER_DRESSING_FAIL 322 #define IDS_USER_SAME_ITEM_COUNT_MAX 323 #define IDS_USER_NOT_ENOUGH_PAY_FOR_OUT 324 #define IDS_USER_DAMAGED_ITEM 325 #define IDS_USER_NEED_EMPTY_SLOT_CHANGE 326 #define IDS_USER_UNSTABLE_CONNECTION 327 #define IDS_USER_EVENT_COMMON_CURE 328 #define IDS_USER_EVENT_ATT7_CHANGE 329 #define IDS_USER_EVENT_DEF7_CHANGE 330 #define IDS_USER_EVENT_ATT6_CHANGE 331 #define IDS_USER_EVENT_DEF6_CHANGE 332 #define IDS_USER_EVENT_ATT5_CHANGE 333 #define IDS_USER_EVENT_DEF5_CHANGE 334 #define IDS_USER_EVENT_RESET_STAT 335 #define IDS_USER_CHECK_INPUT_NUMBER 336 #define IDS_USER_NOT_SUBSCRIPTION 337 #define IDS_USER_ONE_MORE_APPLY 338 #define IDS_USER_ALREADY_LOTTO 339 #define IDS_USER_CANCEL_LOTTO 340 #define IDS_USER_OVER_WEIGHT2 341 #define IDS_USER_CHECK_LOTTO_NUMBER 342 #define IDS_USER_CHECK_SOCIAL_NUMBER 343 #define IDS_USER_GUILD_STORE_SOON 344 #define IDS_USER_DRAW 345 #define IDS_USER_END_NO_APPLY_GUILD 346 #define IDS_USER_END_FIELD_STORE_WAR 347 #define IDS_USER_START_LUINET_SOON 348 #define IDS_USER_START_SANAD_SOON 349 #define IDS_USER_END_LUINET_SOON 350 #define IDS_USER_END_SANAD_SOON 351 #define IDS_USER_END_LUINET_NO_APPLY 352 #define IDS_USER_END_SANAD_NO_APPLY 353 #define IDS_USER_START_LUINET 354 #define IDS_USER_START_SANAD 355 #define IDS_USER_END_FORTRESS 356 #define IDS_USER_RIOT_LUINET 357 #define IDS_USER_RIOT_SANAD 358 #define IDS_USER_RIOT_LUINET_RETURN 359 #define IDS_USER_RIOT_SANAD_RETURN 360 #define IDS_USER_FAIL_EVENT_DATA 361 #define IDS_USER_SYSTEM_MSG05 362 #define IDS_USER_QUEST_END 363 #define IDS_USER_SYSTEM_MSG06 364 #define IDS_USER_SYSTEM_MSG07 365 #define IDS_USER_SYSTEM_MSG08 366 #define IDS_USER_FAIL_CHECK_INVEN 367 #define IDS_USER_ALREADY_REG_NUMBER 368 #define IDS_USER_YOUR_ACCOUNT_REG 369 #define IDS_USER_FLOWER_PRESENT 370 #define IDS_EVENT_FLOWER 371 #define IDS_USER_ACCESSORI_INVALID 372 #define IDS_USER_ACCESSORI_RIPEL 373 #define IDS_USER_ACCESSORI_CREST 374 #define IDS_USER_ACCESSORI_OLD 375 #define IDS_EVENT_LOTTO 376 #define IDS_USER_CHANGE_UPGRADE_ACC_SUCCESS 377 #define IDS_USER_CHANGE_UPGRADE_ACC_FAIL 378 #define IDS_USER_ACCESSORI_SKILLUP 379 #define IDS_USER_GUILD_SUBMASTER_ON 380 #define IDS_USER_GUILD_SUBMASTER_OFF 381 #define IDS_USER_GUILD_SUBMASTER 382 #define IDS_USER_GUILD_SUBMASTER_NOT 383 #define IDS_CANT_REMODELING 384 #define IDS_USER_START_ZAMELLYA_SOON 385 #define IDS_USER_END_ZAMELLYA_SOON 386 #define IDS_USER_END_ZAMELLYA_NO_APPLY 387 #define IDS_USER_START_ZAMELLYA 388 #define IDS_USER_RIOT_ZAMELLYA 389 #define IDS_USER_RIOT_ZAMELLYA_RETURN 390 #define IDS_USER_ALREADY_ZAMELLYA_FORTRESS 391 #define IDS_EVENT_END 392 #define IDS_MOON_SONGPEON_01 393 #define IDS_MOON_SONGPEON_11 394 #define IDS_MOON_SONGPEON_31 395 #define IDS_MOON_SONGPEON_51 396 #define IDS_MOON_SONGPEON_71 397 #define IDS_ANOTHER_PSI_RUN 398 #define IDS_INVENTORY_FULL 399 #define IDS_CANNOT_USE_BOX 400 #define IDS_RING_OF_LIFE 401 #define IDS_NEED_QUEST 402 #define IDS_PSI_ERROR_LEVEL 403 #define IDS_PSI_ERROR_CLASS 404 #define IDS_PSI_ERROR_XP 405 #define IDS_PSI_ERROR_EXIST 406 #define IDS_MIND_SHOCK_SUCCESS 407 #define IDS_CANNOT_REVERSE 408 #define IDS_XP_ERROR 409 #define IDS_HELPER_NOT_NEED_CURE 410 #define IDS_HELPER_CURE_COMPLETED 411 #define IDS_HELPER_CURE_COMPLETED2 412 #define IDS_HELPER_CURE_COMPLETED2_CLASS 413 #define IDS_USER_INVALID_SHOP_LOCATION 414 #define IDS_USER_INVALID_SHOP_NAME 415 #define IDS_USER_INVALID_SHOP_ITEMCOUNT 416 #define IDS_USER_INVALID_SHOP_ITEMAMOUNT 417 #define IDS_USER_INVALID_SHOP_BEINGVIEWED 418 #define IDS_USER_SHOP_NOTENOUGH_DINA 419 #define IDS_USER_SHOP_NOTENOUGH_WEIGHT 420 #define IDS_USER_SHOP_NO_IVEN_SLOT 421 #define IDS_USER_SHOP_BUY_SUCCESS 422 #define IDS_USER_SHOP_BUY_FAIL 423 #define IDS_NOT_HAVING_ITEM 424 #define IDS_SHOPDN_ERROR 425 #define IDS_USER_NOT_ENOUGH_CHAO 426 #define IDS_USER_NOT_ENOUGH_XIAO 427 #define IDS_USER_NOT_ENOUGH_WANG 427 #define IDS_USER_NOT_ENOUGH_SHUI 428 #define IDC_EDIT 1000 #define IDC_INDEX 1002 #define IDC_PORT 1003 #define IDC_ZONE 1004 #define IDC_CLIENT_VER 1005 #define IDC_REFRESH 1006 #define IDC_EDIT_ANNOUNCE 1007 #define IDC_BUTTON_SHUTDOWN 1008 // Next default values for new objects // #ifdef APSTUDIO_INVOKED #ifndef APSTUDIO_READONLY_SYMBOLS #define _APS_NEXT_RESOURCE_VALUE 132 #define _APS_NEXT_COMMAND_VALUE 32771 #define _APS_NEXT_CONTROL_VALUE 1009 #define _APS_NEXT_SYMED_VALUE 101 #endif #endif <file_sep>/InitItemTableSet.h #if !defined(AFX_INITITEMTABLESET_H__E4693E2A_9E7F_43AA_8AC7_A284FD80D530__INCLUDED_) #define AFX_INITITEMTABLESET_H__E4693E2A_9E7F_43AA_8AC7_A284FD80D530__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // InitItemTableSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CInitItemTableSet recordset typedef struct _INITITEMTABLE { BYTE tClass; int iSid; int iSubSid; } InitItemTable; class CInitItemTableSet : public CRecordset { public: CInitItemTableSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CInitItemTableSet) // Field/Param Data //{{AFX_FIELD(CInitItemTableSet, CRecordset) BYTE m_tTypeClass; int m_sSid; int m_sSubSid; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CInitItemTableSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_INITITEMTABLESET_H__E4693E2A_9E7F_43AA_8AC7_A284FD80D530__INCLUDED_) <file_sep>/SETItem.cpp // SETItem.cpp: implementation of the CSETItem class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "SETItem.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CSETItem::CSETItem() { } CSETItem::~CSETItem() { } <file_sep>/DNTable.cpp // DNTable.cpp: implementation of the CDNTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "DNTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CDNTable::CDNTable() { m_sIndex = 0; m_sMinDn = 0; m_sMaxDn = 0; } CDNTable::~CDNTable() { } <file_sep>/CityNpcTableSet.h #if !defined(AFX_CITYNPCTABLESET_H__F5B87727_F157_407A_9C06_389868F32721__INCLUDED_) #define AFX_CITYNPCTABLESET_H__F5B87727_F157_407A_9C06_389868F32721__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // CityNpcTableSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CCityNpcTableSet recordset class CCityNpcTableSet : public CRecordset { public: CCityNpcTableSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CCityNpcTableSet) // Field/Param Data //{{AFX_FIELD(CCityNpcTableSet, CRecordset) int m_sSid; int m_sPid; CString m_strName; int m_sSTR; int m_sDEX; int m_sVOL; int m_sWIS; int m_sMaxHP; int m_sMaxPP; BYTE m_byClass; BYTE m_byClassLevel; int m_sExp; BYTE m_byAX; BYTE m_byAY; BYTE m_byAZ; BYTE m_byRange; int m_sAI; int m_sAttackDelay; BYTE m_byVitalC; BYTE m_byWildShot; BYTE m_byIronSkin; BYTE m_byReAttack; BYTE m_bySubAttack; BYTE m_byState; BYTE m_byPsi; BYTE m_byPsiLevel; BYTE m_bySearchRange; int m_sSpeed; int m_sInclination; BYTE m_byColor; int m_sStandTime; BYTE m_tNpcType; int m_sFamilyType; BYTE m_tItemPer; BYTE m_tDnPer; int m_byDefense; BYTE m_byExciteRate; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CCityNpcTableSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_CITYNPCTABLESET_H__F5B87727_F157_407A_9C06_389868F32721__INCLUDED_) <file_sep>/GuildUser.h // GuildUser.h: interface for the CGuildUser class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_GUILDUSER_H__1623FC3A_0913_46E0_B891_57E63EAD8845__INCLUDED_) #define AFX_GUILDUSER_H__1623FC3A_0913_46E0_B891_57E63EAD8845__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CGuildUser { public: char m_strUserId[CHAR_NAME_LENGTH + 1]; long m_lSid; // 길드인덱스 long m_lUsed; // 여기에 길드원이 셋팅되어있다. CGuildUser(); virtual ~CGuildUser(); }; #endif // !defined(AFX_GUILDUSER_H__1623FC3A_0913_46E0_B891_57E63EAD8845__INCLUDED_) <file_sep>/SharedMemory.h // SharedMemory.h: interface for the CSharedMemory class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_SHAREDMEMORY_H__E4CFD1A7_91B9_4A93_9E85_7C5583208794__INCLUDED_) #define AFX_SHAREDMEMORY_H__E4CFD1A7_91B9_4A93_9E85_7C5583208794__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CSharedMemory { public: CSharedMemory(UINT nSize, LPCTSTR pszName); CSharedMemory(); virtual ~CSharedMemory(); BOOL Create (UINT nSize, LPCTSTR pszName = NULL); void Delete(); BOOL Open(LPCTSTR szName, UINT nSize, BOOL bReadOnly = FALSE); LPVOID m_lpData; HANDLE m_hMapping; UINT m_nSize; }; #endif // !defined(AFX_SHAREDMEMORY_H__E4CFD1A7_91B9_4A93_9E85_7C5583208794__INCLUDED_) <file_sep>/SETItem.h // SETItem.h: interface for the CSETItem class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_SETITEM_H__70964E5E_2CC5_4B1E_8E28_D208F79E155F__INCLUDED_) #define AFX_SETITEM_H__70964E5E_2CC5_4B1E_8E28_D208F79E155F__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CSETItem { public: CSETItem(); virtual ~CSETItem(); }; #endif // !defined(AFX_SETITEM_H__70964E5E_2CC5_4B1E_8E28_D208F79E155F__INCLUDED_) <file_sep>/RemodelingTable.cpp // RemodelingTable.cpp: implementation of the CRemodelingTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "RemodelingTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CRemodelingTable::CRemodelingTable() { } CRemodelingTable::~CRemodelingTable() { } <file_sep>/PAMAExp.cpp // PAMAExp.cpp: implementation of the CPAMAExp class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "PAMAExp.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CPAMAExp::CPAMAExp() { } CPAMAExp::~CPAMAExp() { } <file_sep>/SETItemSet.cpp // SETItemSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "SETItemSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CSETItemSet IMPLEMENT_DYNAMIC(CSETItemSet, CRecordset) CSETItemSet::CSETItemSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CSETItemSet) m_sSetIndex = 0; m_strMainName = _T(""); m_bySetCount = 0; m_bySetAddOption1 = 0; m_bySetAddOption2 = 0; m_bySetAddOption3 = 0; m_bySetAddOption4 = 0; m_bySetAddOption5 = 0; m_bySetAddOption6 = 0; m_bySetAddOption7 = 0; m_bySetAddOption8 = 0; m_bySetAddOption9 = 0; m_sSet2 = 0; m_sSet3 = 0; m_sSet4 = 0; m_sSet5 = 0; m_sSet6 = 0; m_sSet7 = 0; m_sSet8 = 0; m_sSet9 = 0; m_sSet10 = 0; m_sSet11 = 0; m_sSet12 = 0; m_sSet13 = 0; m_sSet14 = 0; m_sSet15 = 0; m_sItem01 = 0; m_sItem02 = 0; m_sItem03 = 0; m_sItem04 = 0; m_sItem05 = 0; m_sItem06 = 0; m_sItem07 = 0; m_sItem08 = 0; m_sItem09 = 0; m_sItem10 = 0; m_nFields = 36; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CSETItemSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame"); } CString CSETItemSet::GetDefaultSQL() { return _T("[dbo].[SET_ITEM]"); } void CSETItemSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CSETItemSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSetIndex]"), m_sSetIndex); RFX_Text(pFX, _T("[strMainName]"), m_strMainName); RFX_Byte(pFX, _T("[bySetCount]"), m_bySetCount); RFX_Byte(pFX, _T("[bySetAddOption1]"), m_bySetAddOption1); RFX_Byte(pFX, _T("[bySetAddOption2]"), m_bySetAddOption2); RFX_Byte(pFX, _T("[bySetAddOption3]"), m_bySetAddOption3); RFX_Byte(pFX, _T("[bySetAddOption4]"), m_bySetAddOption4); RFX_Byte(pFX, _T("[bySetAddOption5]"), m_bySetAddOption5); RFX_Byte(pFX, _T("[bySetAddOption6]"), m_bySetAddOption6); RFX_Byte(pFX, _T("[bySetAddOption7]"), m_bySetAddOption7); RFX_Byte(pFX, _T("[bySetAddOption8]"), m_bySetAddOption8); RFX_Byte(pFX, _T("[bySetAddOption9]"), m_bySetAddOption9); RFX_Int(pFX, _T("[sSet2]"), m_sSet2); RFX_Int(pFX, _T("[sSet3]"), m_sSet3); RFX_Int(pFX, _T("[sSet4]"), m_sSet4); RFX_Int(pFX, _T("[sSet5]"), m_sSet5); RFX_Int(pFX, _T("[sSet6]"), m_sSet6); RFX_Int(pFX, _T("[sSet7]"), m_sSet7); RFX_Int(pFX, _T("[sSet8]"), m_sSet8); RFX_Int(pFX, _T("[sSet9]"), m_sSet9); RFX_Int(pFX, _T("[sSet10]"), m_sSet10); RFX_Int(pFX, _T("[sSet11]"), m_sSet11); RFX_Int(pFX, _T("[sSet12]"), m_sSet12); RFX_Int(pFX, _T("[sSet13]"), m_sSet13); RFX_Int(pFX, _T("[sSet14]"), m_sSet14); RFX_Int(pFX, _T("[sSet15]"), m_sSet15); RFX_Int(pFX, _T("[sItem01]"), m_sItem01); RFX_Int(pFX, _T("[sItem02]"), m_sItem02); RFX_Int(pFX, _T("[sItem03]"), m_sItem03); RFX_Int(pFX, _T("[sItem04]"), m_sItem04); RFX_Int(pFX, _T("[sItem05]"), m_sItem05); RFX_Int(pFX, _T("[sItem06]"), m_sItem06); RFX_Int(pFX, _T("[sItem07]"), m_sItem07); RFX_Int(pFX, _T("[sItem08]"), m_sItem08); RFX_Int(pFX, _T("[sItem09]"), m_sItem09); RFX_Int(pFX, _T("[sItem10]"), m_sItem10); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CSETItemSet diagnostics #ifdef _DEBUG void CSETItemSet::AssertValid() const { CRecordset::AssertValid(); } void CSETItemSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/MagicItemTable.h // MagicItemTable.h: interface for the CMagicItemTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_MAGICITEMTABLE_H__B8250F29_A0D3_45EA_B6DB_942F89F7074A__INCLUDED_) #define AFX_MAGICITEMTABLE_H__B8250F29_A0D3_45EA_B6DB_942F89F7074A__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CMagicItemTable { public: int GetMagicItemValue(); CMagicItemTable(); virtual ~CMagicItemTable(); short m_sSid; short m_sSubType; short m_sChangeValue; BYTE m_tUpgrade; BYTE m_tNeedClass; BYTE m_tWearInfo; BYTE m_tLevel; BYTE m_tUse; BYTE m_sAid; }; #endif // !defined(AFX_MAGICITEMTABLE_H__B8250F29_A0D3_45EA_B6DB_942F89F7074A__INCLUDED_) <file_sep>/StoreRepairSet.cpp // StoreRepairSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "StoreRepairSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CStoreRepairSet IMPLEMENT_DYNAMIC(CStoreRepairSet, CRecordset) CStoreRepairSet::CStoreRepairSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CStoreRepairSet) m_sStoreID = 0; m_sItemID = 0; m_nFields = 2; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CStoreRepairSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CStoreRepairSet::GetDefaultSQL() { return _T("[dbo].[STORE_REPAIR]"); } void CStoreRepairSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CStoreRepairSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sStoreID]"), m_sStoreID); RFX_Int(pFX, _T("[sItemID]"), m_sItemID); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CStoreRepairSet diagnostics #ifdef _DEBUG void CStoreRepairSet::AssertValid() const { CRecordset::AssertValid(); } void CStoreRepairSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/OnlineShopSet.h // OnlineShopSet.h: interface for the OnlineShopSet class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_ONLINESHOPSET_H__07B5B393_ADF1_4021_968E_CD706D64C44A__INCLUDED_) #define AFX_ONLINESHOPSET_H__07B5B393_ADF1_4021_968E_CD706D64C44A__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class OnlineShopSet { public: OnlineShopSet(); virtual ~OnlineShopSet(); }; #endif // !defined(AFX_ONLINESHOPSET_H__07B5B393_ADF1_4021_968E_CD706D64C44A__INCLUDED_) <file_sep>/GuildFortressSet.h #if !defined(AFX_GUILDFORTRESSSET_H__36ADCF11_6B24_447C_AD37_9BA6500D4305__INCLUDED_) #define AFX_GUILDFORTRESSSET_H__36ADCF11_6B24_447C_AD37_9BA6500D4305__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // GuildFortressSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CGuildFortressSet recordset class CGuildFortressSet : public CRecordset { public: CGuildFortressSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CGuildFortressSet) // Field/Param Data //{{AFX_FIELD(CGuildFortressSet, CRecordset) int m_sFortressID; long m_iGuildSid; CString m_strGuildName; int m_sTaxRate; long m_iGuildDN; CTime m_GetTime; CTime m_WarTime; CTime m_WarPlan; BYTE m_tWarType; CByteArray m_HireGuardList; CByteArray m_strAttackList; CByteArray m_strRepairList; int m_sPotalX; int m_sPotalY; int m_sZone; CTime m_MopPartyTime; CTime m_ReleaseTime; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CGuildFortressSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_GUILDFORTRESSSET_H__36ADCF11_6B24_447C_AD37_9BA6500D4305__INCLUDED_) <file_sep>/GuildStoreTable.h // GuildStoreTable.h: interface for the CGuildStoreTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_GUILDSTORETABLE_H__F712EC70_85B1_450C_B2E7_BD6C710FD3D2__INCLUDED_) #define AFX_GUILDSTORETABLE_H__F712EC70_85B1_450C_B2E7_BD6C710FD3D2__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CGuildStoreTable { public: CGuildStoreTable(); virtual ~CGuildStoreTable(); public: int m_sStoreID; long m_iGuildSid; TCHAR m_strGuildName[CHAR_NAME_LENGTH + 1]; TCHAR m_strMasterName[CHAR_NAME_LENGTH + 1]; BYTE m_tTaxRate; DWORD m_iGuildDN; BYTE m_tWarType; int m_wDay; int m_wHour; int m_wMinute; int m_wWarMon; int m_wWarDay; int m_wWarHour; int m_wWarMinute; int m_wWarPlanMon; int m_wWarPlanDay; int m_wWarPlanHour; int m_wWarPlanMinute; }; #endif // !defined(AFX_GUILDSTORETABLE_H__F712EC70_85B1_450C_B2E7_BD6C710FD3D2__INCLUDED_) <file_sep>/UserLevelUpTable.cpp // UserLevelUpTable.cpp: implementation of the CUserLevelUpTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "UserLevelUpTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CUserLevelUpTable::CUserLevelUpTable() { m_sLevel = 0; m_sHP = 0; m_sPP = 0; m_sDamage = 0; m_sDefense = 0; m_sWeight = 0; } CUserLevelUpTable::~CUserLevelUpTable() { } <file_sep>/HuanshiTable.h // EBodyTable.h: interface for the CEBodyTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_HUANSHITABLE_H__59A1C67E_8F9D_43A1_BC80_F9766EC5B86B__INCLUDED_) #define AFX_HUANSHITABLE_H__59A1C67E_8F9D_43A1_BC80_F9766EC5B86B__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CHuanshiTable { public: int GetMagicItemValue1(); CHuanshiTable(); virtual ~CHuanshiTable(); short m_sChangeValue; short m_sRandom; short m_sSubType; BYTE m_tLevel; BYTE m_tNeedClass; BYTE m_tSid; BYTE m_tUpgrade; BYTE m_tWearInfo; }; #endif // !defined(AFX_HUANSHITABLE_H__59A1C67E_8F9D_43A1_BC80_F9766EC5B86B__INCLUDED_) <file_sep>/RMBExchangeShop.h // OnlineShop.h: interface for the OnlineShop class. // ////////////////////////////////////////////////////////////////////// #if _MSC_VER > 1000 #pragma once class CRMBExchangeShop { public: CRMBExchangeShop(); virtual ~CRMBExchangeShop(); public: int m_rmbCode; short m_iSid; TCHAR m_iSname[20]; BYTE m_tIQ; int m_iNum; short m_sx1; short m_sx2; short m_sx3; short m_sx4; short m_sx5; short m_upgrade; short m_sx6; short m_sx7; short m_sx8; short m_sx9; short m_sx10; TCHAR m_iText[200]; }; #endif<file_sep>/UserLevelUpSet.cpp // UserLevelUpSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "UserLevelUpSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CUserLevelUpSet IMPLEMENT_DYNAMIC(CUserLevelUpSet, CRecordset) CUserLevelUpSet::CUserLevelUpSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CUserLevelUpSet) m_sLevel = 0; m_sHP = 0; m_sPP = 0; m_sDamage = 0; m_sDefense = 0; m_sWeight = 0; m_nFields = 6; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CUserLevelUpSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CUserLevelUpSet::GetDefaultSQL() { return _T("[dbo].[USER_LEVEL_UP]"); } void CUserLevelUpSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CUserLevelUpSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sLevel]"), m_sLevel); RFX_Int(pFX, _T("[sHP]"), m_sHP); RFX_Int(pFX, _T("[sPP]"), m_sPP); RFX_Int(pFX, _T("[sDamage]"), m_sDamage); RFX_Int(pFX, _T("[sDefense]"), m_sDefense); RFX_Int(pFX, _T("[sWeight]"), m_sWeight); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CUserLevelUpSet diagnostics #ifdef _DEBUG void CUserLevelUpSet::AssertValid() const { CRecordset::AssertValid(); } void CUserLevelUpSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/NpcTable.cpp // NpcTable.cpp: implementation of the CNpcTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "NpcTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CNpcTable::CNpcTable() { m_sSid = 0; // MONSTER(NPC) Serial ID m_sPid = 0; // MONSTER(NPC) Picture ID ::ZeroMemory(m_strName, sizeof(m_strName)); // MONSTER(NPC) Name m_sSTR = 0; // 힘 m_sDEX = 0; // 민첩 m_sVOL = 0; // 의지 m_sWIS = 0; // 지혜 m_sMaxHP = 0; // 최대 HP m_sMaxPP = 0; // 최대 PP m_byClass = 0; // 무기계열 m_byClassLevel = 0; // 무기계열 레벨 m_sExp = 0; // 경험치 m_byAX = 0; // 공격값 X m_byAY = 0; // 공격값 Y m_byAZ = 0; // 공격값 Z m_iDefense = 0; // 방어값 m_byRange = 0; m_sAI = 0; // 인공지능 인덱스 m_sAttackDelay = 0; // 공격딜레이 m_byVitalC = 0; // 신체데미지 크리티컬 m_byWildShot = 0; // 난사 레벨 m_byExcitedRate = 0; // 흥분 레벨 m_byIronSkin = 0; m_byReAttack = 0; m_bySubAttack = 0; // 상태이상 발생(부가공격) m_byState = 0; // 몬스터 (NPC) 상태이상 m_byPsi = 0; // 사이오닉 적용 m_byPsiLevel = 0; // 사이오닉레벨 m_bySearchRange = 0; // 적 탐지 범위 m_sSpeed = 0; // 이동속도 m_sInclination = 0; m_byColor = 0; m_sStandTime = 0; m_tNpcType = 0; // NPC Type m_sFamilyType = 0; // 몹들사이에서 가족관계를 결정한다. m_tItemPer = 0; // 아이템이 떨어질 확률 m_tDnPer = 0; // 돈이 떨어질확률 } CNpcTable::~CNpcTable() { } <file_sep>/SkillTable.h // SkillTable.h: interface for the CSkillTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_SKILLTABLE_H__C0DA93A2_84E6_4200_92B9_09D1E13ECEBA__INCLUDED_) #define AFX_SKILLTABLE_H__C0DA93A2_84E6_4200_92B9_09D1E13ECEBA__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CSkillTable { public: short m_sPid; short m_sSid; BYTE m_tClass; CWordArray m_arInc; CByteArray m_arRepair; CByteArray m_arSuccess; CSkillTable(); ~CSkillTable(); }; #endif // !defined(AFX_SKILLTABLE_H__C0DA93A2_84E6_4200_92B9_09D1E13ECEBA__INCLUDED_) <file_sep>/UserLevelUpTable.h // UserLevelUpTable.h: interface for the CUserLevelUpTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_USERLEVELUPTABLE_H__9B4BBFC1_B5E0_473C_AB0A_1DCDA91B1FF3__INCLUDED_) #define AFX_USERLEVELUPTABLE_H__9B4BBFC1_B5E0_473C_AB0A_1DCDA91B1FF3__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CUserLevelUpTable { public: CUserLevelUpTable(); virtual ~CUserLevelUpTable(); int m_sLevel; int m_sHP; int m_sPP; int m_sDamage; int m_sDefense; int m_sWeight; }; #endif // !defined(AFX_USERLEVELUPTABLE_H__9B4BBFC1_B5E0_473C_AB0A_1DCDA91B1FF3__INCLUDED_) <file_sep>/Npc.cpp // Npc.cpp: implementation of the CNpc class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "USER.h" #include "Npc.h" #include "Extern.h" #include "MAP.h" #include "BufferEx.h" #include "Mcommon.h" #include "scdefine.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif extern CRITICAL_SECTION m_CS_EventItemLogFileWrite; ////////////////////////////////////////////////////////////////////// // Fortress Paket Variable extern CRITICAL_SECTION m_CS_FortressData; extern CPtrList RecvFortressData; extern long nFortressDataCount; extern struct drop_info g_DropItem[256][4]; //int surround_x[8] = {-2, -1, 0, 1, 2, 1, 0, -1}; //int surround_y[8] = {0, -1, -2, -1, 0, 1, 2, 1}; int surround_x[8] = {-1, -1, 0, 1, 1, 1, 0, -1}; int surround_y[8] = {0, -1, -1, -1, 0, 1, 1, 1}; int g_iMoonEvent = 1; //#define STEP_DELAY //440 /*TCHAR *g_MagicArray[]={ //属性列表 "", "力值1", "体质1", "敏捷1", "智慧1", "智力1", "损伤5", "命中2%", "命中3%", "回避2%", "回避3%", "防御1", "防御2", "抗魔5", "魔伤5", "力值2", "体质2", "敏捷2", "智慧2", "智力2", "损伤10", "防止中毒状态异常", "防止麻痹状态异常", "防止混乱状态异常", "防止视觉丧失状态异常", "防止火焰状态异常", "防止冰冻状态异常", "防止电击状态异常", "射程1", "攻击速度1阶段提高", "命中4%", "命中5%", "回避4%", "回避5%", "故障率1%", "防御3", "防御4", "耐久100", "生命值10", "精神值10", "体力值10", "魔法抗斥力10", "魔法损伤10", "力值3", "体质3", "敏捷3", "智慧3", "智力3", "反击技术等级1", "兴奋技术等级1", "钢铁皮肤技术等级1", "自我医疗技术等级1", "魔法精修技术等级1", "法术大师技术等级1", "精神强化技术等级1", "冥想技术等级1", "穿刺技术等级1", "闪避技术等级1", "精神集中技术等级1", "回复技术等级1", "狙击技术等级1", "连射技术等级1", "灵动技术等级1", "枪械大师技术等级1", "损伤15", "诱发异常中毒状态", "诱发异常麻痹状态", "诱发异常混乱状态", "诱发异常视角丧失状态", "诱发异常火焰状态", "诱发异常冷气状态", "诱发异常电气状态", "攻击速度2阶段提高", "命中率6%", "命中率7%", "回避率6%", "回避率7%", "故障率2%", "防御力5", "防御力6", "耐久性200", "生命值20", "精神值20", "体力值20", "魔法抗斥15", "魔法损伤15", "力值4", "体质4", "敏捷4", "智慧4", "智力4", "损伤20", "射程2", "攻击速度3阶段提高", "命中率8%", "命中率9%", "回避率8%", "回避率9%", "故障率3%", "防御力7", "防御力8", "耐久性300", "生命值30", "精神值30", "体力值30", "魔法抗斥20", "魔法损伤20", "力值5", "体质5", "敏捷5", "智慧5", "智力5", "反击技术等级2", "兴奋技术等级2", "钢铁皮肤技术等级2", "自我医疗技术等级2", "魔法精修技术等级2", "法术大师技术等级2", "精神强化技术等级2", "冥想技术等级2", "穿刺技术等级2", "闪避技术等级2", "集中技术等级2", "回复技术等级2", "狙击技术等级2", "连射技术等级2", "灵动技术等级2", "枪械大师技术等级2", "损伤25", "防御力9", "防御力10", "耐久性500", "生命值40", "精神值40", "体力值40", "魔法抗斥25", "魔法损伤25", "所有的技术等级1", "命中率10%", "回避率10%", "耐久性700", "生命值50", "精神值50", "体力值50", "防止所有状态异常", "所有的技术等级2", "损伤30", "命中率15%", "所有的能力值6", "所有的技术3", "恢复速度1阶段", "恢复速度2阶段", "恢复速度3阶段", "恢复速度4阶段", "回避率15%+", "体质10", "体质15", "敏捷10", "敏捷15", "生命100", "精神100", "力值10", "力值15", "智慧10", "智慧15", "智力10", "智力15", "魔法抗斥1", "魔法抗斥2", "魔法抗斥3", "魔法抗斥4", "魔法损伤1", "魔法损伤2", "魔法损伤3", "魔法损伤4", "损伤1", "损伤2", "损伤3", "损伤4", "命中率1%", "回避率1%", "魔法损伤5", "生命值1", "生命值2", "生命值3", "生命值4", "生命值5", "保存经验值", "自动防护", "无视穿刺概率", "魔法抵抗力50", "所有能力值1", "每10秒消耗魔法值50", "生命值150", "防御力50", "获得经验值30%", "物品暴率30%", "所有能力值100", "强化护法技能等级1", "生命不息技能等级1", "爆发技能等级1", "精准技能等级1", "强化护法技能等级2", "生命不息技能等级2", "爆发技能等级2", "精准技能等级2", 0 };*/ #define UPDATE_EVENT_INVEN_TIME 12 ////////////////////////////////////////////////////////////////////// // Inline Function // inline int CNpc::GetUid(int x, int y ) { MAP* pMap = g_zone[m_ZoneIndex]; return pMap->m_pMap[x][y].m_lUser; } inline BOOL CNpc::SetUid(int x, int y, int id) { MAP* pMap = g_zone[m_ZoneIndex]; if(pMap->m_pMap[x][y].m_bMove != 0) return FALSE; if(pMap->m_pMap[x][y].m_lUser != 0 && pMap->m_pMap[x][y].m_lUser != id ) return FALSE; pMap->m_pMap[x][y].m_lUser = id; return TRUE; } BOOL CNpc::SetUidNPC(int x, int y, int id) { MAP* pMap = g_zone[m_ZoneIndex]; if(pMap->m_pMap[x][y].m_bMove != 0) return FALSE; if(pMap->m_pMap[x][y].m_lUser != 0 && pMap->m_pMap[x][y].m_lUser != id ) return FALSE; pMap->m_pMap[x][y].m_lUser = id; return TRUE; } ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CNpc::CNpc() { m_NpcVirtualState = NPC_STANDING; m_NpcState = NPC_LIVE; InitTarget(); m_ItemUserLevel = 0; // m_Delay = 0; // 促澜 惑怕肺 傈捞登扁 鳖瘤狼 矫埃( mili/sec ) // m_dwLastThreadTime = 0; m_Delay = 0; m_dwLastThreadTime = GetTickCount(); m_sClientSpeed = 0; // 努扼捞攫飘 局聪甫 困茄 捞悼 厚啦 m_dwStepDelay = 0; // 促澜 浦哦鳖瘤狼 矫埃 掉饭捞 m_tNpcAttType = 0; // 傍拜 己氢 m_tNpcLongType = 0; // 盔芭府(1), 辟芭府(0) m_tNpcGroupType = 0; // 档框阑 林绰衬(1), 救林绰衬?(0) // m_tNpcTraceType = 0; // 场鳖瘤 蝶扼埃促(1), 矫具俊辑 绝绢瘤搁 弊父(0) m_pPath = NULL; m_pOrgMap = NULL; // m_pMap = NULL; m_lMapUsed = 0; // 甘 皋葛府焊龋 m_bFirstLive = TRUE; m_tWeaponClass = BRAWL; m_dwDelayCriticalDamage = 0; m_dwLastAbnormalTime = GetTickCount(); ::ZeroMemory(m_pMap, sizeof(m_pMap));// 老瞒盔 甘栏肺 檬扁拳茄促. m_tAbnormalKind = 0; m_dwAbnormalTime = 0; m_presx = -1; m_presy = -1; m_lEventNpc = 0; m_pGuardStore = NULL; // 辨靛傈阑困秦 秦寸 惑痢阑 啊瘤绊 柯促. m_pGuardFortress = NULL; m_tRepairDamaged = 0; m_tNCircle = NPC_NCIRCLE_DEF_STATE; m_lFortressState = 0; m_lDamage = 0; m_bSummon = FALSE; m_sSummonOrgZ = m_sOrgZ; m_sSummonOrgX = m_sOrgX; m_sSummonOrgY = m_sOrgY; m_SummonZoneIndex = m_ZoneIndex; m_bSummonDead = FALSE; m_lNowSummoning = 0; m_lKillUid = -1; m_sQuestSay = 0; InitSkill(); InitUserList(); } CNpc::~CNpc() { ClearPathFindData(); InitUserList(); } ////////////////////////////////////////////////////////////////////// // NPC 胶懦沥焊甫 檬扁拳 茄促. // void CNpc::InitSkill() { for(int i = 0; i < SKILL_NUM; i++) { m_NpcSkill[i].sSid = 0; m_NpcSkill[i].tLevel = 0; m_NpcSkill[i].tOnOff = 0; } } /////////////////////////////////////////////////////////////////////// // 辨茫扁 单捞磐甫 瘤款促. // void CNpc::ClearPathFindData() { ::ZeroMemory(m_pMap, sizeof(m_pMap)); // 老瞒盔 甘阑 困秦 /* int i; if(m_pMap) { int **tmp = m_pMap; m_pMap = NULL; for(i = 0; i < m_vMapSize.cx; i++) { delete[] tmp[i]; } delete[] tmp; } */ } /////////////////////////////////////////////////////////////////////// // NPC 啊 贸澜 积扁芭唱 磷菌促啊 混酒朝 锭狼 贸府 // BOOL CNpc::SetLive(COM* pCom) { NpcTrace(_T("SetLive()")); if(m_tRepairDamaged > 0) return FALSE; // 傍己傈吝 颊惑 罐疽促搁 荐府瞪锭鳖瘤 弊措肺 蜡瘤登绢具窃... if(m_pGuardFortress && m_tGuildWar == GUILD_WARRING) { if(!m_bFirstLive) return FALSE; } else m_tGuildWar = GUILD_WAR_AFFTER; // 葛电 各阑 辨靛傈俊 措秦辑 檬扁拳茄促... if(m_bSummonDead) // 家券夌菌带 各俊 措秦辑 { m_ZoneIndex = m_TableZoneIndex; m_sCurZ = m_sOrgZ = m_sTableOrgZ; m_sOrgX = m_sTableOrgX; m_sOrgY = m_sTableOrgY; m_pOrgMap = g_zone[m_ZoneIndex]->m_pMap; // MapInfo 沥焊 悸泼 m_bSummonDead = FALSE; } // NPC狼 HP, PP 檬扁拳 ----------------------// int i = 0, j = 0; m_sHP = m_sMaxHP; m_sPP = m_sMaxPP; NpcDrop=4; int iTryLiveCount = 0; InitTarget(); InitUserList(); // 鸥百阑困茄 府胶飘甫 檬扁拳. // NPC 檬扁困摹 搬沥 ------------------------// MAP* pMap = g_zone[m_ZoneIndex]; m_nInitMinX = m_sOrgX - m_sMinX; if(m_nInitMinX < 1) m_nInitMinX = 1; m_nInitMinY = m_sOrgY - m_sMinY; if(m_nInitMinY < 1) m_nInitMinY = 1; m_nInitMaxX = m_sOrgX + m_sMaxX; if(m_nInitMaxX >= pMap->m_sizeMap.cx) m_nInitMaxX = pMap->m_sizeMap.cx - 1; m_nInitMaxY = m_sOrgY + m_sMaxY; if(m_nInitMaxY >= pMap->m_sizeMap.cy) m_nInitMaxY = pMap->m_sizeMap.cy - 1; CPoint pt; CPoint ptTemp; int modify_index = 0; char modify_send[2048]; // if(m_lEventNpc == 0 && m_sEvent == 1000) return TRUE;//@@@@@@@@@@@@@@@@@@@@@@ Test Code(烙矫肺 各阑 家券窍扁困秦) if(m_lEventNpc == 1 && !m_bFirstLive) { // 蜡历俊霸 NPC 沥焊傈价... // 捞蜡 : 磷篮 葛嚼阑 焊咯拎具窍扁锭巩俊 促澜 府哩瞪 瞒肥啊 棵锭 INFO_DELETE甫 焊辰促. ::ZeroMemory(modify_send, sizeof(modify_send)); for(int i = 0; i < NPC_NUM; i++) { if(g_arEventNpcThread[0]->m_ThreadInfo.pNpc[i] != NULL) { if(g_arEventNpcThread[0]->m_ThreadInfo.pNpc[i]->m_sNid == m_sNid) { FillNpcInfo(modify_send, modify_index, INFO_DELETE); SendInsight(pCom, modify_send, modify_index); g_arEventNpcThread[0]->m_ThreadInfo.pNpc[i] = NULL; InterlockedExchange(&m_lEventNpc, (LONG)0); InterlockedExchange(&g_arEventNpcThread[0]->m_ThreadInfo.m_lNpcUsed[i], (LONG)0); return TRUE; } } } return TRUE; } if(m_tNpcType != NPCTYPE_MONSTER && m_bFirstLive)//NPCTYPE_DOOR || m_tNpcType == NPCTYPE_GUARD) { m_nInitX = m_sCurX = m_sOrgX; m_nInitY = m_sCurY = m_sOrgY; pMap->m_pMap[m_sCurX][m_sCurY].m_lUser = m_sNid + NPC_BAND; // TRACE("NPC DOOR %s(nid = %d) - %d %d\n", m_strName, m_sNid, m_sCurX, m_sCurY); CPoint temp = ConvertToClient(m_sCurX, m_sCurY); TRACE("NPC DOOR %s(nid = %d) - %d %d\n", m_strName, m_sNid, temp.x, temp.y); } else { while(1) { i++; if(m_lEventNpc == 1) // 家券各老版快 登档废捞搁 贸澜 瘤沥茄 谅钎肺 { if(pMap->m_pMap[m_sOrgX][m_sOrgY].m_bMove == 0) { pt.x = m_sOrgX; pt.y = m_sOrgY; m_nInitX = m_sCurX = pt.x; m_nInitY = m_sCurY = pt.y; //ptTemp = ConvertToClient(m_sCurX, m_sCurY); break; } else { pt = FindNearRandomPoint(m_sOrgX, m_sOrgY); if(pt.x <= 0 || pt.y <= 0) { pt.x = myrand(m_nInitMinX, m_nInitMaxX); pt.y = myrand(m_nInitMinY, m_nInitMaxY); } } } else { pt.x = myrand(m_nInitMinX, m_nInitMaxX); pt.y = myrand(m_nInitMinY, m_nInitMaxY); // Test Code By <NAME> 02-08-13 (泅犁绰 林函 25伎肺父 力茄) if( m_sCurZ != 1 && m_sCurZ != 1005 ) // 老窜 带怜父 茫绰促. { if(m_tNpcType == NPCTYPE_MONSTER) { if( !CheckUserForNpc_Live(pt.x, pt.y) ) { iTryLiveCount += 1; if(iTryLiveCount >= 20) return FALSE; else continue; } } } //TRACE("MONSTER %s(nid = %d) - %d %d\n", m_strName, m_sNid, m_sCurX, m_sCurY); } if(pt.x < 0 || pt.x >= pMap->m_sizeMap.cx) continue; if(pt.y < 0 || pt.y >= pMap->m_sizeMap.cy) continue; if(pMap->m_pMap[pt.x][pt.y].m_bMove != 0 || pMap->m_pMap[pt.x][pt.y].m_lUser != 0) { if(i >= 100) { m_nInitX = m_sCurX = m_sOrgX; m_nInitY = m_sCurY = m_sOrgY; // TRACE("sid = %d, loop = %d My standing point is invalid x = %d, y = %d\n", m_sSid, i, pt.x, pt.y); InterlockedIncrement(&g_CurrentNPCError); return FALSE; // break; /* DeleteNPC();// 昏力啊 酒丛... TRACE("sid = %d, loop = %d My standing point is invalid x = %d, y = %d\n", m_sSid, i, pt.x, pt.y); return FALSE; */ } continue; } m_nInitX = m_sCurX = pt.x; m_nInitY = m_sCurY = pt.y; // ptTemp = ConvertToClient(m_sCurX, m_sCurY); break; } } SetUid(m_sCurX, m_sCurY, m_sNid + NPC_BAND); if(m_sDimension > 0) SetMapTypeBeforeGuildWar(pCom); // 甘阑 悸泼茄促. // 惑怕捞惑 沥焊 檬扁拳 m_dwLastAbnormalTime = GetTickCount(); m_tAbnormalKind = 0; m_dwAbnormalTime = 0; // 傍己傈俊辑 NPC HP啊 荐府啊 救登绢 乐栏搁 // if(m_pGuardFortress) SetFortressState(); if(m_bFirstLive) // NPC 啊 贸澜 混酒唱绰 版快 { NpcTypeParser(); m_tWeaponClass = GetWeaponClass(); m_bFirstLive = FALSE; InterlockedIncrement(&g_CurrentNPC); } // 各阑 付瘤阜栏肺 磷牢 蜡历狼 uid 檬扁拳 m_lKillUid = -1; // Test Code // CString strTemp = m_strName; // if(strTemp == "凝" || strTemp == "弊饭捞飘候遣")m_sHP = 1; // 蜡历俊霸 NPC 沥焊傈价... modify_index = 0; ::ZeroMemory(modify_send, sizeof(modify_send)); FillNpcInfo(modify_send, modify_index, INFO_MODIFY); SendInsight(pCom, modify_send, modify_index); m_presx = -1; m_presy = -1; SightRecalc(pCom); return TRUE; } /////////////////////////////////////////////////////////////////// // NPC 扁夯利牢 己氢阑 盒幅, 悸泼茄促. // void CNpc::NpcTypeParser() { MYSHORT sAI; BYTE upTemp = 0; // 惑困 8厚飘 BYTE dwTemp = 0; // 窍困 8厚飘 sAI.i = (short)m_sAI; upTemp = sAI.b[0]; dwTemp = sAI.b[1]; // temp = m_sAI;//m_byAI m_tNpcAttType = upTemp >> 7; upTemp = upTemp << 1; m_tNpcLongType = upTemp >> 7; upTemp = upTemp << 1; m_tNpcGroupType = upTemp >> 7; m_iNormalATRatio = m_byIronSkin; m_iSpecialATRatio = m_byReAttack; m_iMagicATRatio = m_bySubAttack; m_tSPATRange = m_byWildShot; /* switch( (int)m_byVitalC ) { case 0: // 老馆父 m_bCanNormalAT = TRUE; m_bCanMagicAT = FALSE; m_bCanSPAT = FALSE; break; case 1: // 概流父 m_bCanNormalAT = FALSE; m_bCanMagicAT = TRUE; m_bCanSPAT = FALSE; break; case 2: // 老馆, 漂荐 m_bCanNormalAT = TRUE; m_bCanMagicAT = FALSE; m_bCanSPAT = TRUE; m_tSPATRange = m_byWildShot; m_tSPATAI = m_byExcitedRate; break; case 3: // 概流, 漂荐 m_bCanNormalAT = FALSE; m_bCanMagicAT = TRUE; m_bCanSPAT = TRUE; m_tSPATRange = m_byWildShot; m_tSPATAI = m_byExcitedRate; break; case 4: // 老馆, 概流 m_bCanNormalAT = TRUE; m_bCanMagicAT = FALSE; m_bCanSPAT = TRUE; break; case 5: // 漂荐父 m_bCanNormalAT = FALSE; m_bCanMagicAT = FALSE; m_bCanSPAT = TRUE; m_tSPATRange = m_byWildShot; m_tSPATAI = m_byExcitedRate; break; case 6: // 老馆, 概流, 漂荐 m_bCanNormalAT = TRUE; m_bCanMagicAT = TRUE; m_bCanSPAT = TRUE; m_tSPATRange = m_byWildShot; m_tSPATAI = m_byExcitedRate; break; default: m_bCanNormalAT = TRUE; m_bCanMagicAT = FALSE; m_bCanSPAT = FALSE; break; } */ } /////////////////////////////////////////////////////////////////// // NPC 林函狼 利阑 茫绰促. // BOOL CNpc::FindEnemy(COM *pCom) { BOOL bSearch = FALSE; if(m_tNpcType == NPCTYPE_NPC || m_tNpcType == NPCTYPE_DOOR || m_tNpcType == NPCTYPE_GUILD_DOOR) return FALSE; if(m_tNpcType == NPCTYPE_GUILD_NPC || m_tNpcType == NPCTYPE_GUILD_MARK) return FALSE; if(m_byAX == 0 && m_byAZ == 0 ) return FALSE; // 郴啊 傍拜仿捞 绝栏搁 傍拜窍瘤 臼绰促 if(m_bySearchRange == 0) return FALSE; if(m_tNpcType == NPCTYPE_GUARD) bSearch = TRUE; if(m_tNpcType == NPCTYPE_GUILD_GUARD) { if(m_pGuardFortress && m_tRepairDamaged == NPC_DEAD_REPAIR_STATE) return FALSE; // 傍己傈俊辑 秦寸 堡急器 HP啊 0捞搁 悼累 阂瓷烙 bSearch = TRUE; } if(!bSearch && !m_tNpcAttType && m_Target.id < 0 ) { return FALSE; } else { // if( (GetTickCount() - m_dLastFind) < (DWORD)1000 ) if( (GetTickCount() - m_dLastFind) < (DWORD)2000 ) { return FALSE; } } m_dLastFind = GetTickCount(); // if(m_Target.id != -1) return TRUE; int min_x, min_y, max_x, max_y; min_x = m_sCurX - m_bySearchRange; if( min_x < 0 ) min_x = 0; min_y = m_sCurY - m_bySearchRange; if( min_y < 0 ) min_y = 0; max_x = m_sCurX + m_bySearchRange; max_y = m_sCurY + m_bySearchRange; if(max_x >= g_zone[m_ZoneIndex]->m_sizeMap.cx) max_x = g_zone[m_ZoneIndex]->m_sizeMap.cx - 2; if(max_y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) max_y = g_zone[m_ZoneIndex]->m_sizeMap.cy - 2; int ix, iy; int target_uid; int uid; int rank = 0; USER *pUser = NULL; CNpc *pNpc = NULL; int tempLevel = 0, oldLevel = 1000; for(ix = min_x; ix <= max_x; ix++) { for(iy = min_y; iy <= max_y; iy++) { target_uid = m_pOrgMap[ix][iy].m_lUser; if( target_uid >= USER_BAND && target_uid < NPC_BAND ) { uid = target_uid - USER_BAND; pUser = GetUser(pCom, uid); if( pUser != NULL && pUser->m_bLive == USER_LIVE) { if( ix != pUser->m_curx || iy != pUser->m_cury ) { continue; } if(m_tNpcType == NPCTYPE_GUARD || m_tNpcType == NPCTYPE_GUILD_GUARD) { //rank = DEATH_RANK - CITY_RANK_INTERVAL; if(pUser->m_sKillCount > 100 || pUser->m_bPkStatus) { m_Target.id = target_uid; m_Target.failCount = 0; m_Target.x = ix; m_Target.y = iy; return TRUE; } } // 辨靛傈且锭绰 惑痢家蜡 辨靛盔篮 公矫 if(m_tNpcType == NPCTYPE_GUILD_GUARD) { if(m_tGuildWar == GUILD_WARRING) { //if(pUser->m_dwGuild == m_pGuardStore->m_iGuildSid) continue; if(m_pGuardStore) { if(pUser->m_dwGuild == m_pGuardStore->m_iGuildSid) continue; } else if(m_pGuardFortress) { if(pUser->m_dwGuild == m_pGuardFortress->m_iGuildSid) continue; } m_Target.id = target_uid; m_Target.failCount = 0; m_Target.x = ix; m_Target.y = iy; return TRUE; } } if(pUser->m_tIsOP == 1) continue; // 款康磊捞搁 公矫...^^ if(pUser->m_bPShopOpen == TRUE) continue; // User has personal shop // if(pUser->m_dwHideTime > 0) continue; // 篮葱 惑怕搁 公矫等促. //饶傍各... if(!m_tNpcAttType) // 朝 傍拜茄 仇阑 茫绰促. { if(IsDamagedUserList(pUser) || (m_tNpcGroupType && m_Target.id == target_uid)) { m_Target.id = target_uid; m_Target.failCount = 0; m_Target.x = ix; m_Target.y = iy; return TRUE; } } else // 急傍各... { if(IsSurround(ix, iy) == TRUE) continue; //笛矾 阶咯 乐栏搁 公矫茄促.(盔芭府, 辟芭府 公矫) USER *pTUser; pTUser = pCom->GetUserUid(uid); if ( pTUser == NULL ) continue; tempLevel = pTUser->m_sLevel; if(tempLevel <= oldLevel) { oldLevel = tempLevel; m_Target.id = target_uid; m_Target.failCount = 0; m_Target.x = ix; m_Target.y = iy; return TRUE; } } } } } } InitUserList(); // 酒公档 绝栏骨肺 府胶飘俊 包府窍绰 蜡历甫 檬扁拳茄促. InitTarget(); return FALSE; } ///////////////////////////////////////////////////////////////////////////// // 林函俊 唱甫 傍拜茄 蜡历啊 乐绰瘤 舅酒夯促 // BOOL CNpc::IsDamagedUserList(USER *pUser) { // int count = m_arDamagedUserList.GetSize(); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) return FALSE; int sLen = strlen(pUser->m_strUserID); if(sLen < 0 || sLen > CHAR_NAME_LENGTH) return FALSE; for(int i = 0; i < NPC_HAVE_USER_LIST; i++) { if(strcmp(m_DamagedUserList[i].strUserID, pUser->m_strUserID) == 0) return TRUE; } return FALSE; } ///////////////////////////////////////////////////////////////////////////// // Target 苞 NPC 埃 Path Finding阑 荐青茄促. // BOOL CNpc::GetTargetPath(COM* pCom) { USER* pUser = GetUser(pCom, m_Target.id - USER_BAND); if(pUser == NULL) { InitTarget(); return FALSE; } if(pUser->m_sHP <= 0 || pUser->m_state != STATE_GAMESTARTED || pUser->m_bLive == FALSE) { InitTarget(); return FALSE; } /* if(strcmp(m_Target.szName, pUser->m_strUserID) != 0) { InitTarget(); return FALSE; } */ int iTempRange = m_bySearchRange; // 老矫利栏肺 焊沥茄促. // if(m_arDamagedUserList.GetSize()) iTempRange *= 2; // 傍拜罐篮 惑怕搁 茫阑 裹困 刘啊. if(IsDamagedUserList(pUser)) iTempRange *= 2; // 傍拜罐篮 惑怕搁 茫阑 裹困 刘啊. else iTempRange += 4; int min_x = m_sCurX - iTempRange; if(min_x < 0) min_x = 0; int min_y = m_sCurY - iTempRange; if(min_y < 0) min_y = 0; int max_x = m_sCurX + iTempRange; if(max_x >= g_zone[m_ZoneIndex]->m_sizeMap.cx) max_x = g_zone[m_ZoneIndex]->m_sizeMap.cx - 1; int max_y = m_sCurY + iTempRange; if(max_y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) max_y = g_zone[m_ZoneIndex]->m_sizeMap.cy - 1; // 格钎痢捞 Search Range甫 哈绢唱瘤 臼绰瘤 八荤 CRect r = CRect(min_x, min_y, max_x+1, max_y+1); if(r.PtInRect(CPoint(pUser->m_curx, pUser->m_cury)) == FALSE) return FALSE; // Run Path Find ---------------------------------------------// CPoint start, end; start.x = m_sCurX - min_x; start.y = m_sCurY - min_y; end.x = pUser->m_curx - min_x; end.y = pUser->m_cury - min_y; m_ptDest.x = m_Target.x; m_ptDest.y = m_Target.y; m_min_x = min_x; m_min_y = min_y; m_max_x = max_x; m_max_y = max_y; return PathFind(start, end); } //////////////////////////////////////////////////////////////////////////////// // NPC 啊 Path Find 窍妨绰 付瘤阜 痢篮 泅犁 舅绊府硫惑 亲惑 捞悼阂啊 谅钎捞骨肺 // 付瘤阜 谅钎甫 荐沥 // // ## 泅犁(2000-12-12) 菩胶颇牢靛 舅绊府硫捞 荐沥登绢 捞 窃荐绰 鞘夸绝澜 ## BOOL CNpc::GetLastPoint(int sx, int sy, int& ex, int& ey) { int i; int x = 0, y = 0; int nx[] = {-1, 0, 1, 1, 1, 0, -1, -1}; int ny[] = {-1, -1, -1, 0, 1, 1, 1, 0}; BOOL bSearchDest = FALSE; MAP* pMap = g_zone[m_ZoneIndex]; for(i = 0; i < sizeof(nx)/sizeof(nx[0]); i++) { x = ex + nx[i]; if(x >= pMap->m_sizeMap.cx) x--; if(x < 0) x = 0; y = ey + ny[i]; if(y >= pMap->m_sizeMap.cy) y--; if(y < 0) y = 0; if(m_pOrgMap[x][y].m_bMove == 0 && m_pOrgMap[x][y].m_lUser == 0) { ex = x; ey = y; bSearchDest = TRUE; break; } } if (bSearchDest) return TRUE; int nSearchSize = max(abs(sx - ex), abs(sy - ey)); // ASSERT(nSearchSize); for (i = nSearchSize; i > 0; i--) { x = sx + (ex - sx) * i / nSearchSize; y = sy + (ey - sy) * i / nSearchSize; if ((x + y) % 2 != 0) y++; //焊沥 if(m_pOrgMap[x][y].m_bMove == 0 && m_pOrgMap[x][y].m_lUser == 0) { ex = x; ey = y; bSearchDest = TRUE; break; } } if (!bSearchDest) return FALSE; return TRUE; } ////////////////////////////////////////////////////////////////////////////// // NPC客 Target 苞狼 芭府啊 瘤沥 裹困焊促 累篮瘤 魄窜 // BOOL CNpc::IsCloseTarget(COM* pCom, int nRange) { // 趣矫唱 版厚捍捞 NPC甫 傍拜窍霸 登搁 鸥百捞 蜡历啊 酒聪扼 NPC老 荐 档 乐促. USER* pUser = GetUser(pCom, m_Target.id - USER_BAND); if(pUser == NULL) { InitTarget(); return FALSE; } if(pUser->m_sHP <= 0 || pUser->m_state != STATE_GAMESTARTED || pUser->m_bLive == FALSE) { InitTarget(); return FALSE; } CPoint ptUser = ConvertToClient(pUser->m_curx, pUser->m_cury); CPoint ptNpc = ConvertToClient(m_sCurX, m_sCurY); //^^ 家付俊辑绰 芭府八祸捞 酒聪扼 甘 八祸栏肺 芭府拌魂 棺 肋给等 蜡历 谅钎甫 眉农窍绊 乐澜 int dx = abs(ptUser.x - ptNpc.x); int dy = abs(ptUser.y - ptNpc.y); int max_dist = __max(dx, dy); if(max_dist > nRange * 2) return FALSE; // 努扼牢飘 谅钎绰 2瞒捞啊 茄伎捞骨肺 *2甫 茄促. m_Target.x = pUser->m_curx; m_Target.y = pUser->m_cury; return TRUE; } ////////////////////////////////////////////////////////////////////////////// // NPC客 Target 苞狼 芭府啊 瘤沥 裹困焊促 累篮瘤 魄窜 // BOOL CNpc::IsCloseTarget(USER *pUser, int nRange) { if(pUser == NULL) { return FALSE; } if(pUser->m_sHP <= 0 || pUser->m_state != STATE_GAMESTARTED || pUser->m_bLive == FALSE) { return FALSE; } CPoint ptUser = ConvertToClient(pUser->m_curx, pUser->m_cury); CPoint ptNpc = ConvertToClient(m_sCurX, m_sCurY); //^^ 家付俊辑绰 芭府八祸捞 酒聪扼 甘 八祸栏肺 芭府拌魂 棺 肋给等 蜡历 谅钎甫 眉农窍绊 乐澜 int dx = abs(pUser->m_curx - m_sCurX); int dy = abs(pUser->m_cury - m_sCurY); int max_dist = __max(dx, dy); if(max_dist > nRange * 2) return FALSE; InitTarget(); m_Target.id = pUser->m_uid + USER_BAND; m_Target.x = pUser->m_curx; m_Target.y = pUser->m_cury; /* if(pUser->m_strUserID != NULL) { m_Target.nLen = strlen(pUser->m_strUserID); if(m_Target.nLen <= CHAR_NAME_LENGTH) strncpy(m_Target.szName, pUser->m_strUserID, m_Target.nLen); else ::ZeroMemory(m_Target.szName, sizeof(m_Target.szName)); } */ return TRUE; } //////////////////////////////////////////////////////////////////////////////// // Path Find 肺 茫篮辨阑 促 捞悼 沁绰瘤 魄窜 // BOOL CNpc::IsMovingEnd() { if( m_bRandMove ) // 8规氢 急琶 框流烙老锭 { if( m_arRandMove.GetSize() ) return FALSE; return TRUE; } if(!m_pPath) return TRUE; int min_x = m_min_x; int min_y = m_min_y; if((m_sCurX - min_x) == m_vEndPoint.x && (m_sCurY - min_y) == m_vEndPoint.y) return TRUE; return FALSE; } ///////////////////////////////////////////////////////////////////////////////// // 秦寸 uid狼 USER* 甫 掘绰促. // USER* CNpc::GetUser(COM* pCom, int uid) { if(!pCom) return NULL; //if(uid < 0 || uid >= MAX_USER) return NULL; return pCom->GetUserUid(uid); } ///////////////////////////////////////////////////////////////////////////////// // Target 狼 困摹啊 促矫 辨茫扁甫 且 沥档肺 函沁绰瘤 魄窜 // 去鄂殿狼 惑怕捞惑 荤捞坷葱捞唱 胶懦殿捞 瘤盔登搁 捞 窃荐甫 捞侩窍搁 瞪 淀 // BOOL CNpc::IsChangePath(COM* pCom, int nStep) { if(!m_pPath) return TRUE; CPoint pt; GetTargetPos(pCom, pt); NODE* pTemp = m_pPath; CPoint ptPath[2]; while(1) { if(pTemp == NULL) break; if(pTemp->Parent) { ptPath[0].x = m_min_x + pTemp->x; ptPath[0].y = m_min_y + pTemp->y; pTemp = pTemp->Parent; } else { ptPath[1].x = m_min_x + pTemp->x; ptPath[1].y = m_min_y + pTemp->y; break; } } for(int i = 0; i < 2; i++) { if(abs(ptPath[i].x - pt.x) <= m_byRange && abs(ptPath[i].y - pt.y) <= m_byRange) return FALSE; } return TRUE; } ///////////////////////////////////////////////////////////////////////////////// // Target 狼 泅犁 困摹甫 掘绰促. // BOOL CNpc::GetTargetPos(COM *pCom, CPoint &pt) { USER* pUser = GetUser(pCom, m_Target.id - USER_BAND); if(!pUser) return FALSE; pt.x = pUser->m_curx; pt.y = pUser->m_cury; return TRUE; } ///////////////////////////////////////////////////////////////////////////////// // Target 苞 NPC 埃俊 辨茫扁甫 促矫茄促. // BOOL CNpc::ResetPath(COM* pCom) { CPoint pt; GetTargetPos(pCom, pt); m_Target.x = pt.x; m_Target.y = pt.y; return GetTargetPath(pCom); } ///////////////////////////////////////////////////////////////////////////////// // Step 荐 父怒 鸥南阑 氢秦 捞悼茄促. // BOOL CNpc::StepMove(COM* pCom, int nStep) { // if(m_tNpcType == NPCTYPE_GUILD_DOOR) return FALSE; // 捞悼救窍霸... if(!m_pPath && !m_bRandMove) return FALSE; if(m_NpcState != NPC_MOVING && m_NpcState != NPC_TRACING && m_NpcState != NPC_BACK) return FALSE; int min_x; int min_y; int will_x; int will_y; CPoint ptPre; MAP* pMap = g_zone[m_ZoneIndex]; if( !pMap ) return FALSE; if( !pMap->m_pMap ) return FALSE; for(int i = 0; i < nStep; i++) { if( m_bRandMove ) { if( !m_arRandMove.GetSize() ) return FALSE; min_x = m_min_x; min_y = m_min_y; will_x = min_x + m_arRandMove[0].x; will_y = min_y + m_arRandMove[0].y; m_arRandMove.RemoveAt( 0 ); if( will_x >= pMap->m_sizeMap.cx || will_x < 0 || will_y >= pMap->m_sizeMap.cy || will_y < 0 ) { m_vEndPoint.x = m_sCurX - min_x; m_vEndPoint.y = m_sCurY - min_y; return FALSE; } if( pMap->m_pMap[will_x][will_y].m_bMove != 0 || pMap->m_pMap[will_x][will_y].m_lUser != 0 ) { m_vEndPoint.x = m_sCurX - min_x; m_vEndPoint.y = m_sCurY - min_y; return FALSE; } ptPre.x = m_sCurX; ptPre.y = m_sCurY; m_sCurX = will_x; m_sCurY = will_y; // 矫具 犁拌魂... SightRecalc( pCom ); break; } else if(m_pPath->Parent) { m_pPath = m_pPath->Parent; min_x = m_min_x; min_y = m_min_y; will_x = min_x + m_pPath->x; will_y = min_y + m_pPath->y; if(will_x >= pMap->m_sizeMap.cx || will_x < 0 || will_y >= pMap->m_sizeMap.cy || will_y < 0) { m_vEndPoint.x = m_sCurX - min_x; m_vEndPoint.y = m_sCurY - min_y; return FALSE; } if(pMap->m_pMap[will_x][will_y].m_bMove != 0 || pMap->m_pMap[will_x][will_y].m_lUser != 0) { m_vEndPoint.x = m_sCurX - min_x; m_vEndPoint.y = m_sCurY - min_y; return FALSE; } ptPre.x = m_sCurX; ptPre.y = m_sCurY; m_sCurX = will_x; m_sCurY = will_y; //矫具 犁拌魂... SightRecalc(pCom); break; } return FALSE; } if(SetUid(m_sCurX, m_sCurY, m_sNid + NPC_BAND)) { pMap->m_pMap[ptPre.x][ptPre.y].m_lUser = 0; return TRUE; } else return FALSE; // return SetUid(m_sCurX, m_sCurY, m_sNid + NPC_BAND); } ////////////////////////////////////////////////////////////////////////////// // Target 俊 措茄 傍拜 贸府 // int CNpc::Attack(COM *pCom) { if(!pCom) return 10000; int ret = 0; int nStandingTime = m_sStandTime; // 醚扁拌凯 老锭绰 鸥百苞狼 芭府拌魂阑 崔府秦具 茄促. // if(m_tNpcType != NPCTYPE_GUARD && m_tNpcType != NPCTYPE_GUILD_GUARD)// 版厚捍捞 酒聪搁 矫具 拌魂 // { if(IsCloseTarget(pCom, m_byRange) == FALSE)// Check Code (窜瘤 规绢利牢 螟搁俊辑 持篮 内靛) { if(m_tNpcType == NPCTYPE_GUARD || m_tNpcType == NPCTYPE_GUILD_GUARD) { m_NpcState = NPC_STANDING; return 0; } m_NpcState = NPC_TRACING; // 傍拜窍绊 档噶啊绰 蜡历甫 蝶扼 棱扁困秦(馆览阑 粱歹 狐福霸) return 0; // IsCloseTarget()俊 蜡历 x, y蔼阑 盎脚窍绊 Delay = 0栏肺 淋 } // } short sTempHP = 0; CNpc* pNpc = NULL; USER* pUser = NULL; CByteArray arSkillAction1, arSkillAction2; int nHit = 0; int nAvoid = 0; BOOL bIsHit = FALSE; BOOL bIsCritical = FALSE; int nDamage = 0; int nDefense = 0; int iRandom = 0; // int iDefenseDex = 0; // double determine = 0; int determine = 0; int iDexHitRate = 0, iLevelHitRate = 0; int nID = m_Target.id; // Target 阑 备茄促. // 疙吝咯何 魄窜 函荐 檬扁拳 bIsHit = FALSE; // 雀乔蔼/疙吝魄沥/单固瘤 拌魂 -----------------------------------------// if(nID >= USER_BAND && nID < NPC_BAND) // Target 捞 User 牢 版快 { pUser = GetUser(pCom, nID - USER_BAND); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED)// User 啊 Invalid 茄 版快 { InitTarget(); m_NpcState = NPC_STANDING; return nStandingTime; } if(pUser->m_bLive == USER_DEAD) // User 啊 捞固 磷篮版快 { InitTarget(); m_NpcState = NPC_STANDING; return nStandingTime; } if(pUser->m_dwNoDamageTime != 0) // User 啊 公利鸥烙俊 乐阑版快 { InitTarget(); m_NpcState = NPC_STANDING; return nStandingTime; } if(pUser->m_bPShopOpen == TRUE) // User has personal shop { InitTarget(); m_NpcState = NPC_STANDING; return nStandingTime; } if(pUser->m_bSessionOnline == true) // 离线不被怪攻击 { InitTarget(); m_NpcState = NPC_STANDING; return nStandingTime; } /* 困俊辑 捞固 眉农 窃 if(pUser->m_state == STATE_DISCONNECTED) { InitTarget(); m_NpcState = NPC_STANDING; return nStandingTime; } */ if(m_tNpcType == NPCTYPE_GUILD_GUARD) { if(m_tGuildWar == GUILD_WARRING && pUser->m_dwGuild > 0) { if(m_pGuardStore) { if(pUser->m_dwGuild == m_pGuardStore->m_iGuildSid) return nStandingTime; } else if(m_pGuardFortress) { if(pUser->m_dwGuild == m_pGuardFortress->m_iGuildSid) return nStandingTime; } } } // 篮葱 惑怕捞搁 版厚捍阑 力寇茄 葛电 各俊 秦寸 // if(m_tNpcType != NPCTYPE_GUARD && pUser->m_dwHideTime > 0) // { // InitTarget(); // m_NpcState = NPC_MOVING; // return nStandingTime; // } if(pUser->m_tIsOP == 1) { InitTarget(); m_NpcState = NPC_MOVING; return nStandingTime; } // 雀乔蔼 拌魂 nAvoid = pUser->GetAvoid(); // 疙吝咯何 魄窜 iRandom = (int)((double)XdY(1, 1000) / 10 + 0.5); iDexHitRate = (int)( 30.0 * ( (double)m_sDEX/(m_sDEX + pUser->m_sMagicDEX) ) + 15.0 ); iLevelHitRate = (int)( 70.0 * ( (double)m_byClassLevel/(pUser->m_sLevel + m_byClassLevel) ) + 15.0); determine = iDexHitRate + iLevelHitRate - (nAvoid+pUser->m_Avoid); /* iDefenseDex = pUser->m_sMagicDEX; if(iDefenseDex < 0) iDefenseDex = 0; //规绢 内靛 determine = 200 * ((double)m_sDEX / (m_sDEX + iDefenseDex)) * ((double)m_byClassLevel / (m_byClassLevel + pUser->m_sLevel)); determine = determine - nAvoid; */ if(determine < ATTACK_MIN) determine = ATTACK_MIN; // 弥家 20 else if(determine > ATTACK_MAX) determine = ATTACK_MAX; // 弥措 if(iRandom < determine) bIsHit = TRUE; // 疙吝 // 傍拜 固胶 if(bIsHit == FALSE) { SendAttackMiss(pCom, nID); return m_sAttackDelay;; } // 疙吝捞搁 //Damage 贸府 ----------------------------------------------------------------// nDamage = GetFinalDamage(pUser); // 弥辆 措固瘤 //物理伤害减少与最终伤害减少 //nDamage=nDamage-(pUser->m_DynamicMagicItem[5]+pUser->m_DynamicMagicItem[6]); nDamage=nDamage-(/*pUser->m_DynamicUserData[MAGIC_PHY_ATTACK_DOWN]+*/pUser->m_DynamicUserData[MAGIC_FINALLY_ATTACK_DOWN]); if(nDamage < 15) nDamage = 15; if(pUser->m_tAbnormalKind == ABNORMAL_BYTE_COLD) nDamage += 10; // 惑措祈捞 趁扁 捞惑捞搁 单固瘤 眠啊 //如果存在护法,30%的几率先攻击护法 if(pUser->m_tHuFaType &&pUser->m_nHuFaHP>0) { nID = nID+USER_HUFA_BAND; //伤害数据 pUser->SendDamageNum(0,nID,(short)nDamage); SendAttackSuccess(pCom, nID, bIsCritical, pUser->m_nHuFaHP, pUser->m_nHuFaMaxHP);//yskang 0.3 if(nDamage > 0) pUser->SetHuFaDamage(nDamage); if(pUser->m_nHuFaHP>0) { pUser->HuFaAttack(m_sNid+NPC_BAND); } return m_sAttackDelay; } if(nDamage > 0) pUser->SetDamage(nDamage); // 规绢螟 郴备档 皑家 pUser->SendDamagedItem(nDamage); pUser->SendDamageNum(0,pUser->m_uid+USER_BAND,nDamage); // if(pUser->m_bLive == USER_DEAD || pUser->m_sHP <= 0)//@@@ 唱吝俊 绊魔 if(pUser->m_lDeadUsed == 1) { InitTarget(); m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; if(m_NpcVirtualState == NPC_STANDING) { if(m_sPid == 179) pUser->GetLevelDownExp(FALSE, -1, TRUE,m_strName);//傍侩 阁胶磐老版快 版氰摹 1%皑家 else pUser->GetLevelDownExp(FALSE, -1, FALSE,m_strName);// 版氰摹客 弊寇 函拳樊甫 馆康茄促. } if(m_tNpcType == NPCTYPE_GUARD) pUser->SendCityRank(1); // 版厚捍俊霸 磷栏搁 PK 墨款飘 1 皑家 // 泅犁 版厚捍篮 畴富 傍拜父 窍骨肺 捞镑俊父 眠啊 // Add by JJS 2002.05.24 } //yskang 0.3 SendAttackSuccess(pCom, nID, arSkillAction1, arSkillAction2, pUser->m_sHP, pUser->m_sMagicMaxHP); SendAttackSuccess(pCom, nID, bIsCritical, pUser->m_sHP, pUser->m_sMagicMaxHP);//yskang 0.3 } return m_sAttackDelay; } CNpc* CNpc::GetNpc(int nid) { CNpc* pNpc = NULL; int nSize = g_arNpc.GetSize(); if(nid < 0 || nid >= nSize) return NULL; for( int i = 0; i < g_arNpc.GetSize(); i++) { pNpc = g_arNpc[i]; if( !pNpc ) continue; if( pNpc->m_sNid == nid ) { return pNpc; } } return NULL; } //////////////////////////////////////////////////////////////////////////// // NPC 狼 傍拜仿阑 掘绢柯促. // int CNpc::GetAttack() { int X = m_byAX; int Y = m_byAZ; return XdY(X, Y); } //////////////////////////////////////////////////////////////////////////// // NPC 狼 规绢仿阑 掘绢柯促. // int CNpc::GetDefense() { return m_iDefense; } ///////////////////////////////////////////////////////////////////////////// // Damage 拌魂, 父距 m_sHP 啊 0 捞窍捞搁 荤噶贸府 // BOOL CNpc::SetDamage(int nDamage) { if(m_NpcState == NPC_DEAD) return TRUE; if(m_sHP <= 0) return TRUE; if(nDamage <= 0) return TRUE; m_sHP -= nDamage; if( m_sHP <= 0 ) { m_sHP = 0; return FALSE; } return TRUE; } ////////////////////////////////////////////////////////////////////////////// // NPC 荤噶贸府 // void CNpc::Dead() { long lNpcUid = m_sNid + NPC_BAND; if(m_pOrgMap[m_sCurX][m_sCurY].m_lUser == lNpcUid) { ::InterlockedExchange(&m_pOrgMap[m_sCurX][m_sCurY].m_lUser, (LONG)0); } m_sHP = 0; m_NpcState = NPC_DEAD; if(m_bSummon) { m_bSummonDead = TRUE; m_bSummon = FALSE; } if(m_NpcVirtualState == NPC_MOVING) m_NpcVirtualState = NPC_WAIT; m_Delay = m_sRegenTime; m_bFirstLive = FALSE; } ////////////////////////////////////////////////////////////////////////////// // NPC 荤噶贸府矫 版氰摹 盒硅甫 拌魂茄促.(老馆 蜡历客 滚叼 荤侩磊备盒) // void CNpc::SendExpToUserList(COM *pCom) { int i; int exp = 0;//, eventExp = 0; int totalDamage = 0; int firstDamage = 0; DWORD plusExp = 0; int MaxDamage=0; USER *KeypUser=NULL; if(m_NpcVirtualState == NPC_WAIT) return; // 啊惑傍埃俊辑 版氰摹 绝促. if(m_tNpcType >= NPCTYPE_GUILD_NPC) return; if(InterlockedCompareExchange((long*)&m_lDamage, (long)1, (long)0) == (long)0){ if(NpcDrop<=0) { InterlockedExchange(&m_lDamage, (LONG)0); return; }else{ NpcDrop=NpcDrop-1; } InterlockedExchange(&m_lDamage, (LONG)0); } // if(m_tNpcType == NPCTYPE_GUILD_NPC || m_tNpcType == NPCTYPE_GUILD_DOOR) return; // if(m_tNpcType == NPCTYPE_GUILD_GUARD) return; // 辨靛傈俊辑 磷篮 版厚捍篮 版氰摹甫 救霖促. // SYSTEMTIME gTime; // GetLocalTime(&gTime); USER *pUser = NULL; IsUserInSight(pCom); // 泅犁 伎裹困救俊 乐蠢衬?(各阑 扁霖栏肺 茄 拳搁 : 敲贰弊 悸泼) if(m_DamagedUserList[0].iUid >= 0 && m_DamagedUserList[0].nDamage > 0) // 霉锅掳俊 蜡历啊 乐促搁 2硅 { MaxDamage=firstDamage = m_DamagedUserList[0].nDamage; m_DamagedUserList[0].nDamage = m_DamagedUserList[0].nDamage * 2; } for(i = 0; i < NPC_HAVE_USER_LIST; i++) // 老窜 府胶飘甫 八祸茄促. { if(m_DamagedUserList[i].iUid < 0 || m_DamagedUserList[i].nDamage<= 0) continue; // 救傈内靛 if(m_DamagedUserList[i].bIs == TRUE) pUser = GetUser(pCom, m_DamagedUserList[i].iUid); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; KeypUser=pUser; if(abs(pUser->m_sLevel-m_byClassLevel) >o_yehuoini[0]->djxz) //人物与怪差距40级无法获得经验限制处 continue; //if(pUser->m_dwShopPingDN > 3000 ) //105级后不能得到经验,杀死怪物几个后不能得到经验 // continue; totalDamage = m_DamagedUserList[i].nDamage; if(MaxDamage<totalDamage){ MaxDamage=totalDamage; KeypUser=pUser; } if ( (m_sExp / 5) <= 0 ) continue; if ( totalDamage == 0 ) continue; if(((m_TotalDamage + firstDamage) /5) <= 0) continue; long long t=(long long)(m_sExp / 5) * totalDamage; exp =(int)(t/((m_TotalDamage + firstDamage) / 5)); pUser->m_iCityValue += m_sInclination; if(pUser->m_iCityValue > 2000000000) pUser->m_iCityValue = 2000000000; // 弥措蔼捞 绝绢辑 烙狼肺 沥沁促. if(pUser->m_iDisplayType != 5 && pUser->m_iDisplayType != 6) pUser->GetExpCommon((int)(exp * 1.1)); else pUser->GetExpCommon((int)(exp * 0.8)); //公丰 荤侩磊绰 版氰摹甫 撤冕促. //--------------------------------------------------------------------------------------------- } Dead_User_level=0; if(KeypUser!=NULL) Dead_User_level=KeypUser->m_sLevel; } ////////////////////////////////////////////////////////////////////////////// // 泅犁 各阑 扁霖栏肺 茄 拳搁 裹困救俊 乐绰瘤 魄窜 // void CNpc::IsUserInSight(COM *pCom) { int j; USER* pUser = NULL; int iSearchRange = m_bySearchRange; // 烙矫肺 棱疽促. int min_x, min_y, max_x, max_y; min_x = m_sCurX - 12; if( min_x < 0 ) min_x = 0; min_y = m_sCurY - 13; if( min_y < 0 ) min_y = 0; max_x = m_sCurX + 12; max_y = m_sCurY + 13; if(max_x >= g_zone[m_ZoneIndex]->m_sizeMap.cx) max_x = g_zone[m_ZoneIndex]->m_sizeMap.cx - 1; if(max_y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) max_y = g_zone[m_ZoneIndex]->m_sizeMap.cy - 1; int ix, iy; int target_uid; int uid; int iLen = 0; for(j = 0; j < NPC_HAVE_USER_LIST; j++) { m_DamagedUserList[j].bIs = FALSE; } for(ix = min_x; ix <= max_x; ix++) { for(iy = min_y; iy <= max_y; iy++) { target_uid = m_pOrgMap[ix][iy].m_lUser; if( target_uid >= USER_BAND && target_uid < NPC_BAND ) { uid = target_uid - USER_BAND; for(j = 0; j < NPC_HAVE_USER_LIST; j++) { // 爱绊乐绰 府胶飘惑狼 蜡历客 鞍促搁 if(m_DamagedUserList[j].iUid == uid) // 弥辆 ID甫 厚背秦辑 悼老窍搁 { pUser = pCom->GetUserUid(uid); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED || pUser->m_curz != m_sCurZ) continue; iLen = strlen(pUser->m_strUserID); if(iLen <= 0 || iLen > CHAR_NAME_LENGTH) continue; if(strcmp(pUser->m_strUserID, m_DamagedUserList[j].strUserID) == 0) { // 捞锭辑具 粮犁茄促绰 钎矫甫 茄促. m_DamagedUserList[j].bIs = TRUE; } } } } } } } ////////////////////////////////////////////////////////////////////////////// // NPC DEAD Packet 阑 牢辟 蜡历俊霸 焊辰促. // int CNpc::SendDead(COM *pCom, int type,BOOL TreeBaoLv) { // ASSERT(pCom); if(!pCom) return 0; if(m_NpcState != NPC_DEAD || m_sHP > 0) return 0; CBufferEx TempBuf; CPoint pt = ConvertToClient(m_sCurX, m_sCurY); TempBuf.Add(DEAD); TempBuf.Add((short)(m_sNid + NPC_BAND)); TempBuf.Add((short)pt.x); TempBuf.Add((short)pt.y); SendInsight(pCom, TempBuf, TempBuf.GetLength()); if(type) GiveNpcHaveItem(pCom ,TreeBaoLv); //掉东西 // if(( (type) && (abs(Dead_User_level-m_byClassLevel) <=50) )|| m_sEvent!=0) // GiveNpcHaveItem(pCom); // 酒捞袍 冻备扁(版厚捍捞搁 救冻绢飘覆) return m_sRegenTime; } //////////////////////////////////////////////////////////////////////////////// // 林函俊 利捞 绝芭唱 荐悼各狼 版快 烙狼狼 痢栏肺 辨茫扁甫 茄 饶 框流牢促. // BOOL CNpc::RandomMove(COM *pCom) { if(m_bySearchRange == 0) return FALSE; if(pCom == NULL) return FALSE; if(m_tNpcType == NPCTYPE_GUILD_DOOR) return FALSE; // 捞悼救窍霸... if(m_tNpcType == NPCTYPE_GUARD || m_tNpcType == NPCTYPE_GUILD_GUARD) return FALSE; // 捞悼救窍霸... // NPC 啊 檬扁 困摹甫 哈绢车绰瘤 魄窜茄促. BOOL bIsIn = IsInRange(); MAP* pMap = g_zone[m_ZoneIndex]; if( !pMap ) return FALSE; if( !pMap->m_pMap ) return FALSE; CPoint pt; int nLoop = 0; int nDestX = -1, nDestY = -1; int min_x, min_y, max_x, max_y; int temp_minx = 0, temp_miny = 0, temp_maxx = 0, temp_maxy = 0; CRect rectIn; if(bIsIn) // NPC 啊 檬扁 困摹甫 哈绢唱瘤 臼疽栏搁 { /*alisia int temp_range = m_bySearchRange / 2; min_x = m_sCurX - temp_range; if(min_x < 0) min_x = 0; min_y = m_sCurY - temp_range; if(min_y < 0) min_y = 0; max_x = m_sCurX + temp_range; if(max_x >= g_zone[m_ZoneIndex]->m_sizeMap.cx) max_x = g_zone[m_ZoneIndex]->m_sizeMap.cx - 1; max_y = m_sCurY + temp_range; if(max_y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) max_y = g_zone[m_ZoneIndex]->m_sizeMap.cy - 1; rectIn.IntersectRect(CRect(m_nInitMinX, m_nInitMinY, m_nInitMaxX, m_nInitMaxY), CRect(min_x, min_y, max_x, max_y)); nLoop = 0; while(1) { nDestX = myrand(rectIn.left, rectIn.right); nDestY = myrand(rectIn.top, rectIn.bottom); if(pMap->m_pMap[nDestX][nDestY].m_bMove != 0 || pMap->m_pMap[nDestX][nDestY].m_lUser != 0) { if(nLoop++ >= 10) { TRACE("NOT FIND~~\n"); return FALSE; } continue; } break; } alisia*/ m_bRandMove = TRUE; // 泅犁 框流烙捞 8规氢 急琶 框流烙牢瘤甫 唱鸥辰促 - PathFind() 窃荐 救俊辑 府悸茄促 m_arRandMove.RemoveAll(); int axis_x[3]; axis_x[0] = -1; axis_x[1] = 0; axis_x[2] = 1; int axis_y[3]; axis_y[0] = -1; axis_y[1] = 0; axis_y[2] = 1; int rand_x, rand_y, rand_d; rand_x = myrand( 0, 2 ); rand_y = myrand( 0, 2 ); rand_d = myrand( 1, 5 ); for( int i = 1; i <= rand_d; i++ ) { m_arRandMove.Add( CPoint( axis_x[rand_x] * i, axis_y[rand_y] * i ) ); } m_min_x = m_sCurX; m_min_y = m_sCurY; return TRUE; } else // NPC 啊 檬扁 困摹甫 哈绢车栏搁 { int x = 0, y = 0; min_x = m_sCurX; min_y = m_sCurY; max_x = m_sCurX; max_y = m_sCurY; if(m_nInitMinX < m_sCurX) {min_x -= m_bySearchRange; x += 1;} if(min_x < 0) min_x = 0; if(m_nInitMinY < m_sCurY) {min_y -= m_bySearchRange; y += 1;} if(min_y < 0) min_y = 0; if(m_nInitMaxX > m_sCurX) {max_x += m_bySearchRange; x += 1;} if(max_x >= g_zone[m_ZoneIndex]->m_sizeMap.cx) max_x = g_zone[m_ZoneIndex]->m_sizeMap.cx - 1; if(m_nInitMaxY > m_sCurY) {max_y += m_bySearchRange; y += 1;} if(max_y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) max_y = g_zone[m_ZoneIndex]->m_sizeMap.cy - 1; nLoop = 0; while(1) { nDestX = min_x + (rand() % (m_bySearchRange * x + 1)); if(nDestX > max_x) nDestX = max_x; nDestY = min_y + (rand() % (m_bySearchRange * y + 1)); if(nDestY > max_y) nDestY = max_y; if(pMap->m_pMap[nDestX][nDestY].m_bMove != 0 || pMap->m_pMap[nDestX][nDestY].m_lUser != 0) { if(nLoop++ >= 10) return FALSE; continue; } break; } } if(nDestX < 0 || nDestY < 0) { return FALSE; } // Run Path Find ---------------------------------------------// CPoint start, end; start.x = m_sCurX - min_x; start.y = m_sCurY - min_y; end.x = nDestX - min_x; end.y = nDestY - min_y; if(start.x < 0 || start.y < 0 || end.x < 0 || end.y < 0) { return FALSE; } m_ptDest.x = nDestX; m_ptDest.y = nDestY; m_min_x = min_x; m_min_y = min_y; m_max_x = max_x; m_max_y = max_y; return PathFind(start, end); } ///////////////////////////////////////////////////////////////////////////////////// // NPC 啊 檬扁 积己困摹 救俊 乐绰瘤 八荤 // BOOL CNpc::IsInRange() { // NPC 啊 檬扁 困摹甫 哈绢车绰瘤 魄窜茄促. // CRect rect(m_nInitMinX, m_nInitMinY, m_nInitMaxX, m_nInitMaxY); // return rect.PtInRect(CPoint(m_sCurX, m_sCurY)); if( m_nInitMinX > m_sCurX || m_nInitMaxX < m_sCurX ) return FALSE; if( m_nInitMinY > m_sCurY || m_nInitMaxY < m_sCurY ) return FALSE; return TRUE; } //////////////////////////////////////////////////////////////////////////////////////// // 矫具啊 函版灯绰瘤 魄窜窍绊 函版灯促搁 函版郴侩阑 努扼捞攫飘肺 傈价茄促. // void CNpc::SightRecalc(COM* pCom) { int sx, sy; sx = m_sCurX / SIGHT_SIZE_X; sy = m_sCurY / SIGHT_SIZE_Y; int dir_x = 0; int dir_y = 0; if( sx == m_presx && sy == m_presy ) return; if( m_presx == -1 || m_presy == -1 ) { dir_x = 0; dir_y = 0; } else { if( sx > m_presx && abs(sx-m_presx) == 1 ) dir_x = DIR_H; if( sx < m_presx && abs(sx-m_presx) == 1 ) dir_x = DIR_L; if( sy > m_presy && abs(sy-m_presy) == 1 ) dir_y = DIR_H; if( sy < m_presy && abs(sy-m_presy) == 1 ) dir_y = DIR_L; if( abs(sx-m_presx) > 1 ) dir_x = DIR_OUTSIDE; if( abs(sy-m_presy) > 1 ) dir_y = DIR_OUTSIDE; } int prex = m_presx; int prey = m_presy; m_presx = sx; m_presy = sy; SendUserInfoBySightChange(dir_x, dir_y, prex, prey, pCom); } ////////////////////////////////////////////////////////////////////////////////////////// // 矫具函版栏肺 牢茄 蜡历沥焊 傈价 // void CNpc::SendUserInfoBySightChange(int dir_x, int dir_y, int prex, int prey, COM *pCom) { int min_x = 0, min_y = 0; int max_x = 0, max_y = 0; int sx = m_presx; int sy = m_presy; int modify_index = 0; char modify_send[1024]; ::ZeroMemory(modify_send, sizeof(modify_send)); FillNpcInfo(modify_send, modify_index, INFO_MODIFY); int delete_index = 0; char delete_send[1024]; ::ZeroMemory(delete_send, sizeof(delete_send)); FillNpcInfo(delete_send, delete_index, INFO_DELETE); if( prex == -1 || prey == -1 ) { min_x = (sx-1)*SIGHT_SIZE_X; max_x = (sx+2)*SIGHT_SIZE_X; min_y = (sy-1)*SIGHT_SIZE_Y; max_y = (sy+2)*SIGHT_SIZE_Y; SendToRange(pCom, modify_send, modify_index, min_x, min_y, max_x, max_y); return; } if( dir_x == DIR_OUTSIDE || dir_y == DIR_OUTSIDE ) { min_x = (prex-1)*SIGHT_SIZE_X; max_x = (prex+2)*SIGHT_SIZE_X; min_y = (prey-1)*SIGHT_SIZE_Y; max_y = (prey+2)*SIGHT_SIZE_Y; SendToRange(pCom, delete_send, delete_index, min_x, min_y, max_x, max_y); min_x = (sx-1)*SIGHT_SIZE_X; max_x = (sx+2)*SIGHT_SIZE_X; min_y = (sy-1)*SIGHT_SIZE_Y; max_y = (sy+2)*SIGHT_SIZE_Y; SendToRange(pCom, modify_send, modify_index, min_x, min_y, max_x, max_y); return; } if( dir_x > 0 ) { min_x = (prex-1)*SIGHT_SIZE_X; max_x = (prex)*SIGHT_SIZE_X; min_y = (prey-1)*SIGHT_SIZE_Y; max_y = (prey+2)*SIGHT_SIZE_Y; SendToRange(pCom, delete_send, delete_index, min_x, min_y, max_x, max_y); min_x = (sx+1)*SIGHT_SIZE_X; max_x = (sx+2)*SIGHT_SIZE_X; min_y = (sy-1)*SIGHT_SIZE_Y; max_y = (sy+2)*SIGHT_SIZE_Y; SendToRange(pCom, modify_send, modify_index, min_x, min_y, max_x, max_y); } if( dir_y > 0 ) { min_x = (prex-1)*SIGHT_SIZE_X; max_x = (prex+2)*SIGHT_SIZE_X; min_y = (prey-1)*SIGHT_SIZE_Y; max_y = (prey)*SIGHT_SIZE_Y; SendToRange(pCom, delete_send, delete_index, min_x, min_y, max_x, max_y); min_x = (sx-1)*SIGHT_SIZE_X; max_x = (sx+2)*SIGHT_SIZE_X; min_y = (sy+1)*SIGHT_SIZE_Y; max_y = (sy+2)*SIGHT_SIZE_Y; SendToRange(pCom, modify_send, modify_index, min_x, min_y, max_x, max_y); } if( dir_x < 0 ) { min_x = (prex+1)*SIGHT_SIZE_X; max_x = (prex+2)*SIGHT_SIZE_X; min_y = (prey-1)*SIGHT_SIZE_Y; max_y = (prey+2)*SIGHT_SIZE_Y; SendToRange(pCom, delete_send, delete_index, min_x, min_y, max_x, max_y); min_x = (sx-1)*SIGHT_SIZE_X; max_x = (sx)*SIGHT_SIZE_X; min_y = (sy-1)*SIGHT_SIZE_Y; max_y = (sy+2)*SIGHT_SIZE_Y; SendToRange(pCom, modify_send, modify_index, min_x, min_y, max_x, max_y); } if( dir_y < 0 ) { min_x = (prex-1)*SIGHT_SIZE_X; max_x = (prex+2)*SIGHT_SIZE_X; min_y = (prey+1)*SIGHT_SIZE_Y; max_y = (prey+2)*SIGHT_SIZE_Y; SendToRange(pCom, delete_send, delete_index, min_x, min_y, max_x, max_y); min_x = (sx-1)*SIGHT_SIZE_X; max_x = (sx+2)*SIGHT_SIZE_X; min_y = (sy-1)*SIGHT_SIZE_Y; max_y = (sy)*SIGHT_SIZE_Y; SendToRange(pCom, modify_send, modify_index, min_x, min_y, max_x, max_y); } } //////////////////////////////////////////////////////////////////////////////// // 茄拳搁郴狼 蜡历俊霸父 沥焊傈价 // void CNpc::SendExactScreen(COM* pCom, TCHAR *pBuf, int nLength) { if(nLength <= 0 || nLength >= SEND_BUF_SIZE) return; SEND_DATA* pNewData = NULL; pNewData = new SEND_DATA; if(pNewData == NULL) return; pNewData->flag = SEND_SCREEN; pNewData->len = nLength; ::CopyMemory(pNewData->pBuf, pBuf, nLength); pNewData->uid = 0; pNewData->x = m_sCurX; pNewData->y = m_sCurY; pNewData->z = m_sCurZ; pNewData->zone_index = m_ZoneIndex; pCom->Send(pNewData); if(pNewData) delete pNewData; } /////////////////////////////////////////////////////////////////////////////// // 矫具救俊 乐绰 蜡历俊霸 单捞磐 傈价 // void CNpc::SendInsight(COM* pCom, TCHAR *pBuf, int nLength) { /* if(nLength <= 0 || nLength >= SEND_BUF_SIZE) return; SEND_DATA* pNewData = NULL; pNewData = new SEND_DATA; if(pNewData == NULL) return; pNewData->flag = SEND_INSIGHT; pNewData->len = nLength; ::CopyMemory(pNewData->pBuf, pBuf, nLength); pNewData->uid = 0; pNewData->x = m_sCurX; pNewData->y = m_sCurY; pNewData->z = m_sCurZ; pNewData->zone_index = m_ZoneIndex; EnterCriticalSection( &(pCom->m_critSendData) ); pCom->m_arSendData.Add( pNewData ); LeaveCriticalSection( &(pCom->m_critSendData) ); PostQueuedCompletionStatus( pCom->m_hSendIOCP, 0, 0, NULL ); */ if(nLength <= 0 || nLength >= SEND_BUF_SIZE) return; int sx = m_sCurX / SIGHT_SIZE_X; int sy = m_sCurY / SIGHT_SIZE_Y; int min_x = (sx-1)*SIGHT_SIZE_X; if( min_x < 0 ) min_x = 0; int max_x = (sx+2)*SIGHT_SIZE_X; int min_y = (sy-1)*SIGHT_SIZE_Y; if( min_y < 0 ) min_y = 0; int max_y = (sy+2)*SIGHT_SIZE_Y; MAP* pMap = g_zone[m_ZoneIndex]; if( !pMap ) return; if( max_x >= pMap->m_sizeMap.cx ) max_x = pMap->m_sizeMap.cx - 1; if( max_y >= pMap->m_sizeMap.cy ) max_y = pMap->m_sizeMap.cy - 1; int temp_uid; USER* pUser = NULL; for( int i = min_x; i < max_x; i++ ) { for( int j = min_y; j < max_y; j++ ) { temp_uid = pMap->m_pMap[i][j].m_lUser; if(temp_uid < USER_BAND || temp_uid >= NPC_BAND) continue; else temp_uid -= USER_BAND; if( temp_uid >= 0 && temp_uid < MAX_USER ) { pUser = pCom->GetUserUid(temp_uid); if ( pUser == NULL ) continue; if( pUser->m_state == STATE_GAMESTARTED ) { if( pUser->m_curx == i && pUser->m_cury == j && pUser->m_curz == m_sCurZ ) { Send( pUser, pBuf, nLength ); } } } } } } //////////////////////////////////////////////////////////////////////////////////// // 泅犁 NPC 狼 困摹客 (xpos, ypos) 荤捞狼 芭府甫 拌魂 // BOOL CNpc::GetDistance(int xpos, int ypos, int dist) { if(xpos >= g_zone[m_ZoneIndex]->m_sizeMap.cx || xpos < 0 || ypos >= g_zone[m_ZoneIndex]->m_sizeMap.cy || ypos < 0) return FALSE; int dx = abs(xpos - m_sCurX); int dy = abs(ypos - m_sCurY); if(dx + dy > dist * 2) return FALSE; return TRUE; } ///////////////////////////////////////////////////////////////////////////////////////// // NPC 啊 啊柳 酒捞袍阑 冻焙促. //怪物死后掉的物品计算 void CNpc::GiveNpcHaveItem(COM *pCom,BOOL TreeBaoLv) { int temp = 0; int iPer = 0, iVal = 0; int iRandom; int nCount = 1; int nDnHap = 0; USER *pUser = GetUser(pCom, m_DamagedUserList[0].iUid); if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) // 肋给等 蜡历酒捞叼捞搁 俊矾 { return; } // SYSTEMTIME gTime; // GetLocalTime(&gTime); if(m_NpcVirtualState == NPC_WAIT) return; /* if(gTime.wDay > 7 && gTime.wDay < 14) // 8老何磐 13老鳖瘤 { if(m_byClassLevel <= 20) nCount = 1; else if(m_byClassLevel <= 50) nCount = 2; else if(m_byClassLevel <= 70) nCount = 3; else nCount = 5; } else { */ if(TreeBaoLv ==TRUE || g_sanBaoLv ==TRUE) nCount = 3; //根据活动是否爆多少件装备 else nCount = 2; if(m_sEvent== 32000 || m_sEvent== 30007 ) nCount = 8; if( m_sEvent == NPC_EVENT_MOP ) nCount = 16; // 31000时掉16件物品 if( m_sEvent == NPC_EVENT_GREATE_MOP ) nCount = 10; // 竣胶府柯 殿 漂荐 阁胶磐老 版快 10俺甫 冻绢哆赴促. // } /* for(int i = 0; i < nCount; i++) { iRandom = myrand(1, 30); if(iRandom < m_tItemPer) { iRandom = myrand(1, 10000); for(int i = 2; i < g_NpcItem.m_nField; i += 2) { // iPer = g_NpcItem.m_ppItem[i][m_byClassLevel]; iPer = g_NpcItem.m_ppItem[i][m_sHaveItem]; if(iPer == 0) return; if(iRandom < iPer) { // 快急 扁夯抛捞喉甫 曼炼窍扁困秦 // temp = g_NpcItem.m_ppItem[i-1][m_byClassLevel]; temp = g_NpcItem.m_ppItem[i-1][m_sHaveItem]; iVal = temp; if(temp >= g_arItemTable.GetSize()) return; if(g_arItemTable[temp]->m_byWear <= 5 || g_arItemTable[temp]->m_byWear == 117 || g_arItemTable[temp]->m_byWear == 20) // 函屈抛捞喉捞促. { iVal = IsTransformedItem(g_arItemTable[temp]->m_sSid); if(iVal == -1) iVal = temp;//return; } if (iVal == 845 || iVal == 909 || iVal == 846 || iVal == 907 || iVal == 908) iVal = 847; GiveItemToMap(pCom, iVal, TRUE); break; } } } else if(iRandom < m_tDnPer) { // SYSTEMTIME gTime; //@@@@@@@@@@@@@@@@@@@@@@@@@@@@ Event Code 扁埃悼救 老矫利栏肺 疵妨霖促. // GetLocalTime(&gTime); iPer = g_arDNTable[m_byClassLevel]->m_sMinDn; iVal = g_arDNTable[m_byClassLevel]->m_sMaxDn; iRandom = myrand(iPer, iVal); //@@@@@@@@@@@@@@@@@@@@@@@@@@@@ Event Code 扁埃悼救 老矫利栏肺 疵妨霖促. nDnHap += iRandom; //GiveItemToMap(pCom, iRandom, FALSE); } } if(nDnHap > 0 ) { if (nDnHap == 845 || nDnHap == 909 || nDnHap == 846 || nDnHap == 907 || nDnHap == 908) nDnHap = 847; GiveItemToMap(pCom, nDnHap, FALSE); } */ int tItemHavePer; if(m_sPid>255) return; int tItemPer=g_DropItem[m_sPid][m_byColor].DropNovelity; //掉物品几率 int tLeechdomPer = g_DropItem[m_sPid][m_byColor].DropLeechdom+tItemPer; //掉蓝几率 int tItemN=g_DropItem[m_sPid][m_byColor].n; //物品种类数量 int i; //TRACE("数量 %d\n",nCount); for(i = 0; i < nCount; i++) { //TRACE( "i=%d\n",i); iRandom = myrand(1, 100); //TRACE("随机数 %d\n",iRandom); if(iRandom < tItemPer) { //TRACE( "物品--\n"); iRandom = myrand(1, 10000)%tItemN; tItemHavePer = g_DropItem[m_sPid][m_byColor].novelity[iRandom].per; temp = iVal = g_DropItem[m_sPid][m_byColor].novelity[iRandom].code1+g_DropItem[m_sPid][m_byColor].novelity[iRandom].code2*256; if(iVal >= g_arItemTable.GetSize()){ return; } iRandom = myrand(1, 1000); if(iRandom < tItemHavePer){ if(g_arItemTable[temp]->m_byWear <= 5 || g_arItemTable[temp]->m_byWear == 117 || g_arItemTable[temp]->m_byWear == 20) // 函屈抛捞喉捞促. { iVal = IsTransformedItem(g_arItemTable[temp]->m_sSid); if(iVal == -1) iVal = temp; } /*if (iVal == 987) { CString strMsg; strMsg.Format("玩家『 %s 』 打怪获得『 死亡使者 』!", pUser->m_strUserID);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if (iVal == 1093) { CString strMsg; strMsg.Format("玩家『 %s 』 打怪获得『 冰魂 』!", pUser->m_strUserID);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); }*/ // TRACE( "物品代码 %s,%d\n",g_arItemTable[iVal]->m_strName,iVal); GiveItemToMap(pCom, iVal, TRUE); //break; } } else if(iRandom < tLeechdomPer)//掉蓝药 { //TRACE( "蓝药--\n"); if(m_byClassLevel <30) iVal=31; else if(m_byClassLevel <70) iVal=32; else if(m_byClassLevel <120) iVal=33; else //iVal=227+256*3; iVal=33; GiveItemToMap(pCom, iVal, TRUE); } else{ //TRACE( "钱--\n"); //int money= g_DropItem[m_sPid][m_byColor].money; //iRandom =money+(money *myrand(0,15)%15);//15%浮动 //nDnHap += iRandom; int money= g_DropItem[m_sPid][m_byColor].money; if (m_byClassLevel < 50) { iRandom = money * 120 /100; } if ( m_byClassLevel >= 50 && m_byClassLevel < 70) { iRandom = money; } else if ( m_byClassLevel >= 70 && m_byClassLevel < 90) { iRandom = money * 80 / 100; } else iRandom = money * 50 / 100; nDnHap += iRandom; } //TRACE( "i=%d\n",i); } //TRACE( "i=%d----------------------------------------\n",i); if(nDnHap > 0 ) { if (nDnHap == 845 || nDnHap == 909 || nDnHap == 846 || nDnHap == 907 || nDnHap == 908) nDnHap = 847; GiveItemToMap(pCom, nDnHap, FALSE); } } /////////////////////////////////////////////////////////////////////////////////// // 酒捞袍吝 函屈酒捞袍捞 乐绰瘤 魄窜茄促. // int CNpc::IsTransformedItem(int sid) { int iVal = 0; int i, j, iRandom; for(i = 0; i < g_ValItem.m_nRow; i++) { if(g_ValItem.m_ppItem[g_ValItem.m_nField-2][i] == sid) // 盖 付瘤阜 鞘靛俊辑 酒捞袍 亲格(6俺 辆幅) { iRandom = myrand(1, 100); for(j = 2; j < g_ValItem.m_nField; j+= 2) { iVal = g_ValItem.m_ppItem[j][i]; if(iRandom < iVal) { if(sid >= g_arItemTable.GetSize()) return -1; else return g_ValItem.m_ppItem[j-1][i]; } } } } return -1; } /////////////////////////////////////////////////////////////////////////////////// // 酒捞袍吝 概流 酒捞袍阑 冻绢靛副荐乐绰瘤 魄窜 //怪物掉落物品蓝黄率 int CNpc::IsMagicItem(COM* pCom, ItemList *pItem, int iTable) { /*int i = 0, j; int iMagicTemp = 1 , iRareTemp = 1; int iRandom = myrand( 1, 10000 ); int iMagicCount = 0, iCount = 0; int nLoop = 0, iType = 0; int nEventMoon = 0; int nEventSongpeon = 0; int nEventBox = 0; int iMagicUp=0; // int pingfen=0; //评分 USER* pUser = NULL; /*SYSTEMTIME gTime; //@@@@@@@@@@@@@@@ Event Code 扁埃悼救 老矫利栏肺 疵妨霖促. GetLocalTime(&gTime); if(gTime.wMonth < 7) { iMagicTemp = 10; iRareTemp = 2; } else if(gTime.wMonth == 7) { if(gTime.wDay <= 7) { iMagicTemp = 10; iRareTemp = 2; } } */ /* if(m_sEvent== 32000){ iMagicTemp=0; iRareTemp = 3; }else*/ /*if(m_sEvent == NPC_EVENT_MOP ) // 泅犁 候遣,凝,胶妮乔坷 { iMagicTemp = 0; iRareTemp = 4; } else if(m_sEvent == NPC_EVENT_GREATE_MOP) // 竣胶府柯 { iMagicTemp = 0; //70 iRareTemp = 8; //75 } else if(m_sEvent == 31000) // 竣胶府柯 { iMagicTemp = 0; //70 iRareTemp = 10000; //75 } /*if(m_lKillUid >= 0) { pUser = GetUser(pCom, m_lKillUid); if(pUser != NULL && pUser->m_state == STATE_GAMESTARTED) { if(pUser->m_dwMagicFindTime != 0) // 概流 颇牢歹甫 冈篮 惑怕捞搁 { iMagicTemp=0; iMagicUp=10000; } } }*/ /*if(m_lKillUid >= 0) { pUser = GetUser(pCom, m_lKillUid); if(pUser != NULL && pUser->m_state == STATE_GAMESTARTED) { if(pUser->m_dwMagicFindTime != 0 || pUser->m_isDoubleBAOLV != 0 || g_sanBaoLv == TRUE) { iMagicTemp *= 7; //o_yehuoini[0]->xyl; //幸运时掉蓝装备 iRareTemp *= 15; //o_yehuoini[0]->xyh; //幸运时掉金装备 } else if(pUser->m_dwMagicFindTime == 0 || pUser->m_isDoubleBAOLV == 0) { iMagicTemp *= 4; //o_yehuoini[0]->lan; //没有幸运时掉蓝装备 iRareTemp *= 1; //o_yehuoini[0]->huang; //没有幸运时掉金装备 } } } /*if(pUser->m_dwMagicFindTime != 0) // 概流 颇牢歹甫 冈篮 惑怕捞搁 { iMagicTemp=0; iMagicUp=10000; }*/ /*nEventMoon = NPC_RARE_ITEM * iRareTemp + (NPC_EVENT_MOON - NPC_RARE_ITEM); nEventSongpeon = nEventMoon + (NPC_EVENT_SONGPEON - NPC_EVENT_MOON); nEventBox = nEventSongpeon + (NPC_EVENT_BOX - NPC_EVENT_SONGPEON); if(iRandom <= NPC_MAGIC_ITEM * iMagicTemp) // 概流 { nLoop = 2; iType = MAGIC_ITEM; } else if((iRandom > NPC_MAGIC_ITEM * iMagicTemp) && (iRandom <=( NPC_RARE_ITEM * iRareTemp+iMagicUp))) { nLoop = 4; iType = RARE_ITEM; } else if(0 && iRandom > NPC_RARE_ITEM * iRareTemp && iRandom <= nEventMoon) { return EVENT_ITEM_MOON; } else if(0 && iRandom > nEventMoon && iRandom <= nEventSongpeon) { return EVENT_ITEM_SONGPEON; } else if(0 && iRandom > nEventSongpeon && iRandom <= nEventBox) { return EVENT_ITEM_BOX; } else return NORMAL_ITEM; // 老馆酒捞袍 int iTemp = 0; if(m_ItemUserLevel <= 20) iMagicCount = 205; //42; // 掉的物品属性控制 //neo版本 else if(m_ItemUserLevel <= 40) iMagicCount = 205; //106 else if(m_ItemUserLevel <= 60) iMagicCount = 205; //143 else iMagicCount = 205; //数值越大,掉的属性越好 if(iMagicCount >= g_arMagicItemTable.GetSize()) iMagicCount = g_arMagicItemTable.GetSize() - 1; while(nLoop > i) // 公炼扒 4俺加己蔼阑 备茄促. { iRandom = myrand(0, iMagicCount); //------------------------------------------------------------------------------------------------ //yskang 0.6 蜡丰荤侩磊俊霸 橇府固决 亮篮 可记狼 酒捞袍捞 冻绢柳促. if(pUser != NULL) { if(pUser->m_iDisplayType !=5 && pUser->m_iDisplayType !=6)//蜡丰 荤侩磊捞促. { if(m_ItemUserLevel <= 20) iRandom = myrand(0, iMagicCount); //亮篮 酒捞袍捞 冻绢龙 犬伏阑 臭牢促. else iRandom = myrand(41, iMagicCount); } } //------------------------------------------------------------------------------------------------ if(!g_arMagicItemTable[iRandom]->m_tUse) continue; if(CheckClassItem(iTable, iRandom) == FALSE) { if(i == 0) continue; // 概流篮 扁夯捞 1俺 // else if(iType == RARE_ITEM && i <= 2) continue; // 限制黄色物品属性必须为2条以上 else if(iType == RARE_ITEM && i <= 3) continue; // 限制黄色物品属性必须为3条以上 else { i++; continue; } }*/ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int i = 0, j; int iMagicTemp = 1 , iRareTemp = 1; int iRandom = myrand( 1, 10000 ); if(pItem == NULL) return 0; int pai = 2; int iMagicCount = 0, iCount = 0; int nLoop = 0, iType = 0; int nEventMoon = 0; int nEventSongpeon = 0; int nEventBox = 0; int iMagicUp=0; USER* pUser = NULL; SYSTEMTIME gTime; GetLocalTime(&gTime); if(m_lKillUid >= 0) { pUser = GetUser(pCom, m_lKillUid); int random = myrand(0, 10000); int random2 = myrand(0,10000); if(pUser != NULL && pUser->m_state == STATE_GAMESTARTED) { if(pUser->m_dwMagicFindTime > 0 || pUser->m_isDoubleBAOLV == TRUE || g_sanBaoLv == TRUE) //幸运 捐款 或者服务器三倍都只爆黄 { if(random <= 6500) //5% 黄金 { if(random2 <= 1) //3排 { nLoop = 3; iType = RARE_ITEM; } else { nLoop = 4; iType = RARE_ITEM; } } else if( random > 3500 && random <= 6500) //25% 蓝色 { if(random2 <= 5000) //1排 { nLoop = 1; iType = MAGIC_ITEM; } else { nLoop = 2; iType = MAGIC_ITEM; } } else iType = NORMAL_ITEM; } else //不是幸运时 { if(random <= 400) //1% 黄金 { if(random2 <= 3000) //3排 { nLoop = 3; iType = RARE_ITEM; } else { nLoop = 4; iType = RARE_ITEM; } } else if( random > 400 && random <= 600) //25% 蓝色 { if(random2 <= 5000) //1排 { nLoop = 1; iType = MAGIC_ITEM; } else { nLoop = 2; iType = MAGIC_ITEM; } } else iType = NORMAL_ITEM; } } else { iType = NORMAL_ITEM; } } else { iType = NORMAL_ITEM; } if(m_sEvent >= 30000) { nLoop = 4; iType = RARE_ITEM; } int iTemp = 0; if(m_ItemUserLevel <= 20) iMagicCount = 205; // 阳光删除垃圾属性主意的地方 else if(m_ItemUserLevel <= 40) iMagicCount = 205; else if(m_ItemUserLevel <= 60) iMagicCount = 205; else iMagicCount = 205; if(iMagicCount >= g_arMagicItemTable.GetSize()) iMagicCount = g_arMagicItemTable.GetSize() - 1; while(nLoop > i) // 公炼扒 4俺加己蔼阑 备茄促. { iRandom = myrand(0, iMagicCount); if(pUser != NULL && pUser->m_state == STATE_GAMESTARTED) { if(m_ItemUserLevel <= 20) iRandom = myrand(0, iMagicCount); else iRandom = myrand(0, iMagicCount); } if(iRandom == 107 || iRandom == 109 || iRandom == 110 || iRandom == 128 ||iRandom == 141 || iRandom == 135 || iRandom == 136 || iRandom == 138 || iRandom == 139) { int random = myrand(0, 10000); if (random > 3000) continue; } if(!g_arMagicItemTable[iRandom]->m_tUse) continue; if(CheckClassItem(iTable, iRandom) == FALSE) { if(i == 0) continue; else if(iType == RARE_ITEM && i <= 3) continue; // 如果是金装备.属性必须4条 4排 <=3 是4排 else { i++; continue; } } for(j = 0; j < 4; j++) { if(o_yehuoini[0]->chongdie == 0) { if (pItem->tMagic[j] < 0 || pItem->tMagic[j] >= iMagicCount) continue; iCount = g_arMagicItemTable[pItem->tMagic[j]]->m_sSubType;//限制物品不出重复属性代码 } if(iCount != 0 && iCount == g_arMagicItemTable[iRandom]->m_sSubType) // 加己捞 般磨荐 乐栏骨肺 捞吝 奴蔼父 急琶 { iCount = g_arMagicItemTable[pItem->tMagic[j]]->m_sChangeValue; if(iCount < g_arMagicItemTable[iRandom]->m_sChangeValue) { iTemp = g_arMagicItemTable[pItem->tMagic[j]]->m_tLevel; if(pItem->sLevel - iTemp > 0) pItem->sLevel -= iTemp; pItem->sLevel += g_arMagicItemTable[iRandom]->m_tLevel; pItem->tMagic[j] = iRandom; if(g_arMagicItemTable[pItem->tMagic[j]]->m_sSubType == MAGIC_DURATION_UP) { iTemp = g_arMagicItemTable[pItem->tMagic[j]]->m_sChangeValue; if(pItem->sDuration - iTemp > 0) pItem->sDuration -= iTemp; pItem->sDuration += g_arMagicItemTable[iRandom]->m_sChangeValue; // 郴备仿 刘啊 加己篮 悸泼瞪锭 } break; } else if(iCount == g_arMagicItemTable[iRandom]->m_sChangeValue) break; } if(pItem->tMagic[j] > 0) continue; // 捞固 浇吩俊 蔼捞 乐栏搁 逞绢皑 else { pItem->tMagic[j] = iRandom; i++; if(g_arMagicItemTable[iRandom]->m_tLevel > 0) pItem->sLevel += g_arMagicItemTable[iRandom]->m_tLevel; if(g_arMagicItemTable[pItem->tMagic[j]]->m_sSubType == MAGIC_DURATION_UP) { pItem->sDuration += g_arMagicItemTable[iRandom]->m_sChangeValue; // 郴备仿 刘啊 加己篮 悸泼瞪锭 } /* int bianhao = pItem->sSid; int shuxing = pItem->tMagic[j]; int tClass = g_arItemTable[bianhao]->m_byWear; if ( tClass == 1 )//检测类型为武器给评分 { //属性 128 损伤25 138命中10 93攻击速度3 145所有技术2 113兴奋2 117法术2 120穿刺2 125连射2 if (shuxing == 138 || shuxing == 128 || shuxing == 93 || shuxing == 145|| shuxing == 113 || shuxing == 117 || shuxing == 120 || shuxing == 125) { pingfen += 50; } //属性 95命中9% if (shuxing == 95 ) { pingfen += 45; } //属性 91损伤20 94命中8 72攻击速度2 if (shuxing == 91 || shuxing == 94 || shuxing == 72 ) { pingfen += 40; } //属性 75命中 7 if (shuxing == 74 ) { pingfen += 35; } //属性 64损伤15 73命中6 137所有技术等级1 49兴奋技术等级1 53法术大师等级1 56穿刺技术等级1 61连射技术等级1 if (shuxing == 64 || shuxing == 73 || shuxing == 137 || shuxing == 49 || shuxing == 53 || shuxing == 56 || shuxing == 61) { pingfen += 30; } //属性 31命中5 if (shuxing == 31 ) { pingfen += 25; } } else if ( tClass != 1 )//检测类型装备给评分 { //属性 145所有2 128损伤25 141HP50 135抗魔25 139回避10 107力量5 110智慧5 109敏捷5 130防御10 if (shuxing == 145 || shuxing == 128|| shuxing == 141 || shuxing == 135 || shuxing == 139 || shuxing == 107 || shuxing == 110 || shuxing == 109 || shuxing == 130) { pingfen += 50; } //属性 97回避9 129防御9增加 if (shuxing == 97 || shuxing == 129 ) { pingfen += 45; } //属性 100防御8、132HP40 105抗魔20 96回避8 86力量4 88敏捷4 89智慧4 if (shuxing == 100 || shuxing == 132 || shuxing == 105 || shuxing == 96 || shuxing == 86 || shuxing == 88 || shuxing == 89 ) { pingfen += 40; } //属性 76回避7 99防御7 if (shuxing == 76 || shuxing == 99 ) { pingfen += 35; } //属性 75回避6 102HP30 84抗魔15 75回避6 43力量3 46智慧3 45敏捷3 if (shuxing == 75 || shuxing == 102 || shuxing == 84|| shuxing == 75 || shuxing == 43 || shuxing == 46 || shuxing == 45 ) { pingfen += 30; } //属性 33回避5 78防御5 if (shuxing == 33|| shuxing == 78 ) { pingfen += 25; } } break; } } } int bianhao2 = pItem->sSid; int tClass = g_arItemTable[bianhao2]->m_byWear; if ( tClass == 1 && pingfen >10 )//物品为武器 评分大于0 数值自己改 { CString strMsg; CString GoodsMagic; for(int i = 0; i < 4; i++) { if(pItem->tMagic[i] != 0) { GoodsMagic+=g_MagicArray[pItem->tMagic[i]]; GoodsMagic+=","; } } /*评分[ %d ]*/ /* ,pingfen*/ /* strMsg.Format("玩家:『 %s 』 打到『 %s 』属性:", pUser->m_strUserID,g_arItemTable[bianhao2]->m_strName );//公告提示 strMsg+=GoodsMagic; pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } else if ( tClass != 1 && pingfen > 10 )//物品为装备 评分大于0 数值自己改 { CString strMsg; CString GoodsMagic; //ZeroMemory(GoodsMagic,0); for(int i = 0; i < 4; i++) { if(pItem->tMagic[i] != 0) { GoodsMagic+=g_MagicArray[pItem->tMagic[i]]; GoodsMagic+=","; } } strMsg.Format("玩家:『 %s 』 打到『 %s 』属性:", pUser->m_strUserID,g_arItemTable[bianhao2]->m_strName );//公告提示 strMsg+=GoodsMagic; pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); }*/ break; } } } return iType; } //随机守护属性 void CNpc::shouhu_rand( ItemList *pMapItem) { int a; a=myrand(0,15)%100; pMapItem->tIQ=0x09;//守护颜色 switch (a){ case 0:pMapItem->tMagic[0]=1; return;//损伤2 3 5 case 1:pMapItem->tMagic[0]=4; return;//武器等级增加1 2 3 case 2:pMapItem->tMagic[0]=7; return;//将对方生命2 5 10转化成自己生命 case 3:pMapItem->tMagic[0]=10; return;//对周围造成2% 5 10伤害 case 4:pMapItem->tMagic[0]=13; return;//13 防御增加3 6 10 case 5:pMapItem->tMagic[0]=16; return;//魔法防御增加3 6 10 case 6:pMapItem->tMagic[0]=19; return;//将敌人对自己攻击伤害2% 3 5 反射到敌人身上 case 7:pMapItem->tMagic[0]=22; return;// 打怪经验值提高2% 3% 5% case 8:pMapItem->tMagic[0]=25; return;//金币提高10% 金币提高20% 金币提高50% case 9:pMapItem->tMagic[0]=28; return;// 所有技能增加1 所有技能增加2 所有技能增加3 case 10:pMapItem->tMagic[0]=31; return;//掉落物品提高2% 掉落物品提高3% 掉落物品提高5% case 11:pMapItem->tMagic[0]=34; return;//攻击力增加10 攻击力增加20 攻击力增加30 case 12:pMapItem->tMagic[0]=37; return;//吸取对方生命10 吸取对方生命20 吸取对方生命30 case 13:pMapItem->tMagic[0]=40; return;//生命增加10 生命增加20 生命增加30 case 14:pMapItem->tMagic[0]=1; return;//损伤2 3 5 case 15:pMapItem->tMagic[0]=13; return;//13 防御增加3 6 10 case 16:pMapItem->tMagic[0]=1; return;//损伤2 3 5 case 17:pMapItem->tMagic[0]=34; return;//攻击力增加10 攻击力增加20 攻击力增加30 case 18:pMapItem->tMagic[0]=13; return;//13 防御增加3 6 10 case 19:pMapItem->tMagic[0]=1; return;//损伤2 3 5 case 20:pMapItem->tMagic[0]=40; return;//生命增加10 生命增加20 生命增加30 case 21:pMapItem->tMagic[0]=1; return;//损伤2 3 5 case 22:pMapItem->tMagic[0]=13; return;//13 防御增加3 6 10 case 23:pMapItem->tMagic[0]=40; return;//生命增加10 生命增加20 生命增加30 case 24:pMapItem->tMagic[0]=40; return;//生命增加10 生命增加20 生命增加30 case 25:pMapItem->tMagic[0]=1; return;//损伤2 3 5 case 26:pMapItem->tMagic[0]=13; return;//13 防御增加3 6 10 case 27:pMapItem->tMagic[0]=1; return;//损伤2 3 5 case 28:pMapItem->tMagic[0]=40; return;//生命增加10 生命增加20 生命增加30 case 29:pMapItem->tMagic[0]=34; return;//攻击力增加10 攻击力增加20 攻击力增加30 case 30:pMapItem->tMagic[0]=1; return;//损伤2 3 5 default: pMapItem->tMagic[0]=0;return; } } /////////////////////////////////////////////////////////////////////////////////// // NPC Item阑 甘俊 冻焙促. // void CNpc::GiveItemToMap(COM *pCom, int iItemNum, BOOL bItem, int iEventNum) { int i, iRandom = 0; int iType = 0; BYTE tEBodySid = 0; CPoint pt; pt = FindNearRandomPointForItem(m_sCurX, m_sCurY); // 泅犁 磊扁谅钎甫 力寇茄 24沫 if(pt.x <= -1 || pt.y <= -1) return; if(pt.x >= g_zone[m_ZoneIndex]->m_sizeMap.cx || pt.y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) return; ItemList *pMapItem = NULL; if(InterlockedCompareExchange((long*)&g_zone[m_ZoneIndex]->m_pMap[pt.x][pt.y].m_FieldUse, (long)1, (long)0) == (long)0) { pMapItem = new ItemList; if(!bItem) // 捣老 版快 { pMapItem->tType = TYPE_MONEY; pMapItem->dwMoney = iItemNum; pMapItem->uid[0] = m_iHaveItemUid[0].uid; pMapItem->uid[1] = m_iHaveItemUid[1].uid; pMapItem->uid[2] = m_iHaveItemUid[2].uid; pMapItem->SuccessRate[0] = (BYTE)m_iHaveItemUid[0].nDamage; pMapItem->SuccessRate[1] = (BYTE)m_iHaveItemUid[1].nDamage; pMapItem->SuccessRate[2] = (BYTE)m_iHaveItemUid[2].nDamage; pMapItem->dwTime = GetItemThrowTime(); } else // 酒捞袍 格废老版快 { if(iItemNum >= g_arItemTable.GetSize()) { if(pMapItem) delete pMapItem; return; } else { pMapItem->tType = TYPE_ITEM; pMapItem->sLevel = g_arItemTable[iItemNum]->m_byRLevel; pMapItem->sSid = g_arItemTable[iItemNum]->m_sSid; pMapItem->sDuration = g_arItemTable[iItemNum]->m_sDuration; pMapItem->sCount = 1; pMapItem->sBullNum = g_arItemTable[iItemNum]->m_sBullNum; for(i = 0; i < MAGIC_NUM; i++) pMapItem->tMagic[i] = 0; // 唱吝俊 Magic Item 眠啊且 巴 pMapItem->tIQ = NORMAL_ITEM; pMapItem->iItemSerial = 0; pMapItem->uid[0] = m_iHaveItemUid[0].uid; // 快急 鉴困 pMapItem->uid[1] = m_iHaveItemUid[1].uid; pMapItem->uid[2] = m_iHaveItemUid[2].uid; pMapItem->SuccessRate[0] = (BYTE)m_iHaveItemUid[0].nDamage; // 快急 鉴困 厚啦 pMapItem->SuccessRate[1] = (BYTE)m_iHaveItemUid[1].nDamage; pMapItem->SuccessRate[2] = (BYTE)m_iHaveItemUid[2].nDamage; pMapItem->dwTime = GetItemThrowTime(); //GetLocalTime(&pMapItem->ThrowTime); int iWear = g_arItemTable[iItemNum]->m_byWear; if(iWear >= 1 && iWear <= 5) { iType = IsMagicItem(pCom, pMapItem, iItemNum); if(iType == MAGIC_ITEM) { pMapItem->tIQ = MAGIC_ITEM; // 概流父 贸府... } else if(iType == RARE_ITEM) { pMapItem->tIQ = RARE_ITEM; // 贰绢父 贸府... // int n = pMapItem->tMagic[0] + pMapItem->tMagic[1] + pMapItem->tMagic[2] + pMapItem->tMagic[3]; // if(n > 500) { // int iRandom = myrand(1, 100); // //极品掉落控制 // if(iRandom < 30){ // pMapItem->tMagic[0] = 0; // } // } } else if(iType == EVENT_ITEM_MOON) // 焊抚崔 { iItemNum = EVENTITEM_SID_MOON; pMapItem->sLevel = g_arItemTable[iItemNum]->m_byRLevel; pMapItem->sSid = g_arItemTable[iItemNum]->m_sSid; pMapItem->sDuration = g_arItemTable[iItemNum]->m_sDuration; pMapItem->sCount = 1; pMapItem->sBullNum = g_arItemTable[iItemNum]->m_sBullNum; iWear = g_arItemTable[iItemNum]->m_byWear; } else if(iType == EVENT_ITEM_SONGPEON) // 价祈 肚绰 岿捍 { /* if(m_byClassLevel < 11) iItemNum = EVENTITEM_SID_SONGPEON_01; else if(m_byClassLevel >= 11 && m_byClassLevel < 31) iItemNum = EVENTITEM_SID_SONGPEON_11; else if(m_byClassLevel >= 31 && m_byClassLevel < 51) iItemNum = EVENTITEM_SID_SONGPEON_31; else if(m_byClassLevel >= 51 && m_byClassLevel < 71) iItemNum = EVENTITEM_SID_SONGPEON_51; else if(m_byClassLevel >= 71) iItemNum = EVENTITEM_SID_SONGPEON_71; */ iItemNum = EVENTITEM_SID_SONGPEON_01; pMapItem->sLevel = g_arItemTable[iItemNum]->m_byRLevel; pMapItem->sSid = g_arItemTable[iItemNum]->m_sSid; pMapItem->sDuration = g_arItemTable[iItemNum]->m_sDuration; pMapItem->sCount = 1; pMapItem->sBullNum = g_arItemTable[iItemNum]->m_sBullNum; iWear = g_arItemTable[iItemNum]->m_byWear; } else if(iType == EVENT_ITEM_BOX) // 急拱惑磊 { iItemNum = EVENTITEM_SID_BOX; pMapItem->sLevel = g_arItemTable[iItemNum]->m_byRLevel; pMapItem->sSid = g_arItemTable[iItemNum]->m_sSid; pMapItem->sDuration = g_arItemTable[iItemNum]->m_sDuration; pMapItem->sCount = 1; pMapItem->sBullNum = g_arItemTable[iItemNum]->m_sBullNum; iWear = g_arItemTable[iItemNum]->m_byWear; } } ////////////////////////////////////////////////////////////////////////月神套 else if(iItemNum==818 || iItemNum==733 || iItemNum==735 ){ byte tMagic=0; iRandom = myrand(1, 11); switch(iRandom){ case 1: tMagic=78;break;//防御5 case 2: tMagic=107;break;//力量5 case 3: tMagic=108;break;//体质5 case 4: tMagic=109;break;//敏捷5 case 5: tMagic=110;break;//智慧5 case 6: tMagic=111;break;//智力5 case 7: tMagic=33;break;//回避5 case 8: tMagic=14;break;//魔法损伤5 case 9: tMagic=6;break;//损伤5 case 10: tMagic=31;break;//命中5 case 11: tMagic=137;break;//所有1 default: tMagic=0;break; } pMapItem->tIQ = SET_ITEM; pMapItem->tMagic[0] =tMagic; } else if(iItemNum==991 ){ //byte tMagic=0; pMapItem->tIQ = UNIQUE_ITEM; //pMapItem->tMagic[0] =tMagic; } ////////////////////////////////////////////////////////////////////////////// // 厩技荤府 贸府 else if(iWear >= 6 && iWear <= 8) // 首饰掉落 { pMapItem->tIQ = MAGIC_ITEM; // 厩技荤府绰 公炼扒 概流 pMapItem->tMagic[0] = g_arItemTable[iItemNum]->m_bySpecial; } // 俊捞歹 贸府 else if(iWear == 126) //浩石掉落....和属性随机. { iRandom = myrand(1, 1040); for(i = 0; i < g_arEBodyTable.GetSize(); i++) { if(iRandom <= g_arEBodyTable[i]->m_sRandom) { tEBodySid = g_arEBodyTable[i]->m_tSid; break; } } pMapItem->tIQ = MAGIC_ITEM; // pMapItem->tMagic[0] = tEBodySid; }else if(iWear==130) { shouhu_rand(pMapItem); }else if(iWear==143){//幻石 byte tMagic=0; iRandom = myrand(1, 6); switch(iRandom){ case 1: tMagic=3;break; case 2: tMagic=13;break; case 3: tMagic=23;break; case 4: tMagic=43;break; case 5: tMagic=53;break; case 6: tMagic=63;break; default: tMagic=0;break; } pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] =tMagic; pMapItem->tMagic[5]= 1; } else if(iItemNum == 1051)//芯片 { byte tMagic = 0; iRandom = myrand(1,3); switch(iRandom) { case 1: tMagic = 5;break; case 2: tMagic = 6;break; case 3: tMagic = 12;break; } pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] = tMagic; } else if(iItemNum==1287){//炼狱球 byte tMagic=0; iRandom = myrand(1, 8); switch(iRandom){ case 1: tMagic=162;break;//15力 case 2: tMagic=156;break;//15体质 case 3: tMagic=158;break;//15敏捷 case 4: tMagic=164;break;//15智慧 case 5: tMagic=166;break;//15智力 case 6: tMagic=190;break;// case 7: tMagic=193;break;//150/50抗血 case 8: tMagic=194;break;//50防 default: tMagic=0;break; } pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] =tMagic; }else if(iWear==166){//程序卡掉落属性随机 byte tMagic=0; iRandom = myrand(1, 44); switch(iRandom){ case 1: tMagic=1;break; case 2: tMagic=2;break; case 3: tMagic=3;break; case 4: tMagic=4;break; case 5: tMagic=5;break; case 6: tMagic=6;break; case 7: tMagic=7;break; case 8: tMagic=8;break; case 9: tMagic=9;break; case 10: tMagic=10;break; case 11: tMagic=11;break; case 12: tMagic=12;break; case 13: tMagic=13;break; case 14: tMagic=14;break; case 15: tMagic=15;break; case 16: tMagic=16;break; case 17: tMagic=17;break; case 18: tMagic=18;break; case 19: tMagic=19;break; case 20: tMagic=20;break; case 21: tMagic=21;break; case 22: tMagic=22;break; case 23: tMagic=23;break; case 24: tMagic=24;break; case 25: tMagic=25;break; case 26: tMagic=26;break; case 27: tMagic=27;break; case 28: tMagic=38;break; case 29: tMagic=29;break; case 30: tMagic=30;break; case 31: tMagic=31;break; case 32: tMagic=32;break; case 33: tMagic=33;break; case 34: tMagic=34;break; case 35: tMagic=35;break; case 36: tMagic=36;break; case 37: tMagic=37;break; case 38: tMagic=38;break; case 39: tMagic=39;break; case 40: tMagic=40;break; case 41: tMagic=41;break; case 42: tMagic=42;break; case 43: tMagic=43;break; case 44: tMagic=44;break; default: tMagic=0;break; } pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] =tMagic;} else if(iItemNum== 1175) //晶片 { pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] = 2; } else if(iItemNum == 1176) //库丁 { pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] = 3; } else if(iItemNum == 1174) //仙瑟 { pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] = 1; } else if(iItemNum == 1177) //V3附件 { pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] = 7; } else if(iItemNum==1178){ //导弹固定属性 pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] =4; }else if(iItemNum==1179){ //激光固定属性 pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] =5; }else if(iItemNum==987){ pMapItem->tIQ = MAGIC_ITEM; pMapItem->tMagic[0] =59; } } } // 秦寸 蜡历俊霸 舅赴促. // pCom->DelThrowItem(); pCom->SetThrowItem( pMapItem, pt.x, pt.y, m_ZoneIndex ); ::InterlockedExchange(&g_zone[m_ZoneIndex]->m_pMap[pt.x][pt.y].m_FieldUse, 0); } } ////////////////////////////////////////////////////////////////////////////////// // 泅犁 困摹甫 吝缴栏肺 25 伎吝 酒捞袍甫 冻绢龙荐乐绰 谅钎甫 伐待栏肺 积己 // CPoint CNpc::FindNearRandomPoint(int x, int y) { CPoint t; int i; int iX, iY; int rand_x = 1, rand_y = 1; MAP *pMap = g_zone[m_ZoneIndex]; if( !pMap ) return CPoint(-1, -1); if( !pMap->m_pMap ) return CPoint(-1, -1); int dir[25][2]; // X Y dir[0][0] = 0; dir[0][1] = 0; // dir[1][0] = -1; dir[1][1] = 0; // dir[2][0] = -1; dir[2][1] = 1; // dir[3][0] = 0; dir[3][1] = 1; // dir[4][0] = 1; dir[4][1] = 1; // dir[5][0] = 1; dir[5][1] = 0; // dir[6][0] = 1; dir[6][1] = -1; // dir[7][0] = 0; dir[7][1] = -1; // dir[8][0] = -1; dir[8][1] = -1; // dir[9][0] = -2; dir[9][1] = -1; // dir[10][0] = -2; dir[10][1] = 0; // dir[11][0] = -2; dir[11][1] = 1; // dir[12][0] = -2; dir[12][1] = 2; // dir[13][0] = -1; dir[13][1] = 2; // dir[14][0] = 0; dir[14][1] = 2; // dir[15][0] = 1; dir[15][1] = 2; // dir[16][0] = 2; dir[16][1] = 2; // dir[17][0] = 2; dir[17][1] = 1; // dir[18][0] = 2; dir[18][1] = 0; // dir[19][0] = 2; dir[19][1] = -1; // dir[20][0] = 2; dir[20][1] = -2; // dir[21][0] = 1; dir[21][1] = -2; // dir[22][0] = 0; dir[22][1] = -2; // dir[23][0] = -1; dir[23][1] = -2; // dir[24][0] = -2; dir[24][1] = -2; // rand_x = myrand(1, 8, TRUE); rand_y = myrand(0, 1, TRUE); iX = dir[rand_x][rand_y] + x; iY = dir[rand_x][rand_y] + y; rand_x = iX; rand_y = iY; if(rand_x >= pMap->m_sizeMap.cx || rand_x < 0 || rand_y >= pMap->m_sizeMap.cy || rand_y < 0) return CPoint(-1, -1); if(g_zone[m_ZoneIndex]->m_pMap[rand_x][rand_y].m_lUser == 0) { if( IsMovable( rand_x, rand_y ) ) { // if(g_zone[m_ZoneIndex]->m_pMap[rand_x][rand_y].iIndex == -1) return CPoint( rand_x, rand_y ); return CPoint( rand_x, rand_y ); } } rand_x = x, rand_y = y; for( i = 1; i < 25; i++) { iX = rand_x + dir[i][0]; iY = rand_y + dir[i][1]; if( iX >= pMap->m_sizeMap.cx || iX < 0 || iY >= pMap->m_sizeMap.cy || iY < 0) continue; if(g_zone[m_ZoneIndex]->m_pMap[iX][iY].m_lUser != 0) continue; // 后 甘牢瘤 犬牢茄促. if( IsMovable( iX, iY ) ) { // if(g_zone[m_ZoneIndex]->m_pMap[iX][iY].iIndex == -1) return CPoint( iX, iY ); return CPoint( iX, iY ); } } return CPoint(-1, -1); } CPoint CNpc::FindNearRandomPointForItem(int x, int y) { CPoint t; int i; int iX, iY; int rand_x = 1, rand_y = 1; MAP *pMap = g_zone[m_ZoneIndex]; if( !pMap ) return CPoint(-1, -1); if( !pMap->m_pMap ) return CPoint(-1, -1); int dir[25][2]; // X Y dir[0][0] = 0; dir[0][1] = 0; // dir[1][0] = -1; dir[1][1] = 0; // dir[2][0] = -1; dir[2][1] = 1; // dir[3][0] = 0; dir[3][1] = 1; // dir[4][0] = 1; dir[4][1] = 1; // dir[5][0] = 1; dir[5][1] = 0; // dir[6][0] = 1; dir[6][1] = -1; // dir[7][0] = 0; dir[7][1] = -1; // dir[8][0] = -1; dir[8][1] = -1; // dir[9][0] = -2; dir[9][1] = -1; // dir[10][0] = -2; dir[10][1] = 0; // dir[11][0] = -2; dir[11][1] = 1; // dir[12][0] = -2; dir[12][1] = 2; // dir[13][0] = -1; dir[13][1] = 2; // dir[14][0] = 0; dir[14][1] = 2; // dir[15][0] = 1; dir[15][1] = 2; // dir[16][0] = 2; dir[16][1] = 2; // dir[17][0] = 2; dir[17][1] = 1; // dir[18][0] = 2; dir[18][1] = 0; // dir[19][0] = 2; dir[19][1] = -1; // dir[20][0] = 2; dir[20][1] = -2; // dir[21][0] = 1; dir[21][1] = -2; // dir[22][0] = 0; dir[22][1] = -2; // dir[23][0] = -1; dir[23][1] = -2; // dir[24][0] = -2; dir[24][1] = -2; // rand_x = myrand(1, 8, TRUE); rand_y = myrand(0, 1, TRUE); iX = dir[rand_x][rand_y] + x; iY = dir[rand_x][rand_y] + y; rand_x = iX; rand_y = iY; if(rand_x >= pMap->m_sizeMap.cx || rand_x < 0 || rand_y >= pMap->m_sizeMap.cy || rand_y < 0) return CPoint(-1, -1); // if(g_zone[m_ZoneIndex]->m_pMap[rand_x][rand_y].m_lUser == 0) // { // if( IsMovable( rand_x, rand_y ) ) // { // if(g_zone[m_ZoneIndex]->m_pMap[rand_x][rand_y].iIndex == -1) return CPoint( rand_x, rand_y ); // } // } if( g_zone[m_ZoneIndex]->m_pMap[rand_x][rand_y].m_FieldUse == 0 ) { if( g_zone[m_ZoneIndex]->m_pMap[rand_x][rand_y].iIndex == -1 ) return CPoint( rand_x, rand_y ); } rand_x = x, rand_y = y; for( i = 1; i < 25; i++) { iX = rand_x + dir[i][0]; iY = rand_y + dir[i][1]; if( iX >= pMap->m_sizeMap.cx || iX < 0 || iY >= pMap->m_sizeMap.cy || iY < 0) continue; // if(g_zone[m_ZoneIndex]->m_pMap[iX][iY].m_lUser != 0) continue; // 后 甘牢瘤 犬牢茄促. if( g_zone[m_ZoneIndex]->m_pMap[iX][iY].m_FieldUse != 0 ) continue; // 荤侩吝牢瘤 眉农. // if( IsMovable( iX, iY ) ) // { // if(g_zone[m_ZoneIndex]->m_pMap[iX][iY].iIndex == -1) return CPoint( iX, iY ); // } if(g_zone[m_ZoneIndex]->m_pMap[iX][iY].iIndex == -1) return CPoint( iX, iY ); } return CPoint(-1, -1); } /////////////////////////////////////////////////////////////////////////////////// // x, y 啊 框流老 荐 乐绰 谅钎牢瘤 魄窜 // BOOL CNpc::IsMovable(int x, int y) { if(x < 0 || y < 0 ) return FALSE; if(!g_zone[m_ZoneIndex] ) return FALSE; if(!g_zone[m_ZoneIndex]->m_pMap) return FALSE; if(x >= g_zone[m_ZoneIndex]->m_sizeMap.cx || y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) return FALSE; if(g_zone[m_ZoneIndex]->m_pMap[x][y].m_bMove || g_zone[m_ZoneIndex]->m_pMap[x][y].m_lUser) return FALSE; return TRUE; } ////////////////////////////////////////////////////////////////////////////////////////////// // NPC 傍拜 己傍 // void CNpc::SendAttackSuccess(COM *pCom, int tuid, BOOL bIsCritical, short sHP, short sMaxHP) { if(pCom == NULL) return; CBufferEx TempBuf; // CByteArray arAction1; // CByteArray arAction2; TempBuf.Add(ATTACK_RESULT); //-------------------------------------------------- //yskang 0.3 NPC绰 农府萍拿 傍拜捞 绝促. //-------------------------------------------------- TempBuf.Add(ATTACK_SUCCESS); TempBuf.Add((int)(m_sNid + NPC_BAND)); TempBuf.Add(tuid); //---------------------------------------------------- /* BYTE tAction1 = (BYTE)arAction1.GetSize(); BYTE tAction2 = (BYTE)arAction2.GetSize(); int i = 0; TempBuf.Add(tAction1); if(tAction1 > 0) { for(i = 0; i < arAction1.GetSize(); i++) { TempBuf.Add(arAction1[i]); } } TempBuf.Add(tAction2); if(tAction2 > 0) { for(i = 0; i < arAction2.GetSize(); i++) { TempBuf.Add(arAction2[i]); } } */ TempBuf.Add((int)sHP); TempBuf.Add((int)sMaxHP); // SendInsight(pCom, TempBuf, TempBuf.GetLength()); SendExactScreen(pCom, TempBuf, TempBuf.GetLength()); } //////////////////////////////////////////////////////////////////////////// // NPC 傍拜 固胶 // void CNpc::SendAttackMiss(COM *pCom, int tuid) { CBufferEx TempBuf; TempBuf.Add(ATTACK_RESULT); TempBuf.Add(ATTACK_MISS); TempBuf.Add((int)(m_sNid + NPC_BAND)); TempBuf.Add(tuid); // SendInsight(pCom, TempBuf, TempBuf.GetLength()); SendExactScreen(pCom, TempBuf, TempBuf.GetLength()); } ///////////////////////////////////////////////////////////////////////// // NPC 狼 公扁 拌凯阑 备茄促. // BYTE CNpc::GetWeaponClass() { BYTE tClass = BRAWL; switch (m_byClass) { case 1: tClass = BRAWL; break; case 2: tClass = STAFF; break; case 4: tClass = EDGED; break; case 8: tClass = FIREARMS; break; } return tClass; } ///////////////////////////////////////////////////////////////////////////////////// // 傍拜矫付促 胶懦己傍 咯何甫 眉农茄促. // void CNpc::IsSkillSuccess(BOOL *bSuccess) { int iOnCount = 0; int i = 0; for(i = 0; i < SKILL_NUM; i++) bSuccess[i] = FALSE; for(i = 0; i < SKILL_NUM; i++) { if(m_NpcSkill[i].tOnOff == 1) iOnCount++; } int iRandom = XdY(1, 100); int iRate = 0; for(i = 0; i < SKILL_NUM; i++) { iRate = (int)((double)m_sWIS * 0.5 + m_NpcSkill[i].tLevel * 2 - iOnCount * 25 + 50 /* +Magic Bonus*/); //!Magic if(iRandom <= iRate) { bSuccess[i] = TRUE; } } } ////////////////////////////////////////////////////////////////////////////////////// // 檬扁 老馆 单固瘤甫 掘绰促. // int CNpc::GetNormalInitDamage() { int nHit = 0; int nDamage = 0; int xyz = 0; xyz = XdY(m_byAX, m_byAY) + m_byAZ; // 2002-10-17 by <NAME> // if(m_byClass == FIREARMS) nHit = (int)((double)m_sDEX/3 + 0.5); // else nHit = (int)((double)m_sSTR/2 + 0.5); if(nHit < 0) nHit = 0; nDamage = nHit + xyz; return nDamage; } /////////////////////////////////////////////////////////////////////////////////////// // 檬扁 农府萍拿 单固瘤甫 掘绰促. // int CNpc::GetCriticalInitDamage(BOOL *bSuccessSkill) { int nDamage = 0; int xyz = 0; xyz = XdY(m_byAX, m_byAY) + m_byAZ; /* // 焊胶各老锭 扁裙俊辑 舅妨临抗沥... */ return nDamage; } /////////////////////////////////////////////////////////////////////////// // 趁扁 单固瘤甫 悸泼茄促. // void CNpc::SetColdDamage() { if(m_tAbnormalKind != ABNORMAL_BYTE_NONE) return; // 捞固 惑怕捞惑捞 吧妨乐绰 惑怕捞搁 府畔 m_tAbnormalKind = ABNORMAL_BYTE_COLD; m_dwAbnormalTime = COLD_TIME; m_dwLastAbnormalTime = GetTickCount(); } /////////////////////////////////////////////////////////////////////////// // 拳堪单固瘤甫 悸泼茄促. // void CNpc::SetFireDamage() { if(m_tAbnormalKind != ABNORMAL_BYTE_NONE) return; // 捞固 惑怕捞惑捞 吧妨乐绰 惑怕捞搁 府畔 m_tAbnormalKind = ABNORMAL_BYTE_FIRE; m_dwAbnormalTime = FIRE_TIME; m_dwLastAbnormalTime = GetTickCount(); } ///////////////////////////////////////////////////////////////////////////// // Damage 拌魂, 父距 m_sHP 啊 0 捞窍捞搁 荤噶贸府 // BOOL CNpc::SetDamage(int nDamage, int uuid, COM *pCom) { if(m_NpcState == NPC_DEAD) return TRUE; if(m_sHP <= 0) return TRUE; if(nDamage <= 0) return TRUE; if(m_tNpcType == NPCTYPE_GUARD) return TRUE; if(m_tGuildWar == GUILD_WAR_AFFTER) { if(m_tNpcType >= NPCTYPE_GUILD_NPC) return TRUE; // if(m_tNpcType == NPCTYPE_GUILD_NPC) return TRUE; // if(m_tNpcType == NPCTYPE_GUILD_GUARD) return TRUE; // if(m_tNpcType == NPCTYPE_GUILD_DOOR) return TRUE; } if(InterlockedCompareExchange((long*)&m_lDamage, (long)1, (long)0) == (long)0) { int i; int iLen = 0; int userDamage = 0; ExpUserList *tempUser = NULL; int uid = uuid - USER_BAND; USER* pUser = GetUser(pCom, uid); // 秦寸 荤侩磊牢瘤 牢刘 if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) { InterlockedExchange(&m_lDamage, (LONG)0); return TRUE; } iLen = strlen(pUser->m_strUserID); if(iLen <= 0 || iLen > CHAR_NAME_LENGTH) { InterlockedExchange(&m_lDamage, (LONG)0); return TRUE; } if(m_tGuildWar == GUILD_WARRING)// 辨靛傈捞 国绢瘤绰 惑痢狼 惑老 版快 { if(m_tNpcType == NPCTYPE_GUILD_NPC) // 惑痢苞 傍己傈俊辑 NPC绰 辑肺 促弗 搬苞甫 父甸扁 锭巩 { // if(m_pGuardStore) { SetDamagedInGuildWar(nDamage, pUser); InterlockedExchange(&m_lDamage, (LONG)0); return TRUE; } if(m_pGuardFortress) { SetDamagedInFortressWar(nDamage, pUser); InterlockedExchange(&m_lDamage, (LONG)0); return TRUE; } } else if(m_tNpcType == NPCTYPE_GUILD_DOOR) // 傍己傈阑 困秦 漂喊洒 { if(m_pGuardFortress) { SetDoorDamagedInFortressWar(nDamage, pUser); InterlockedExchange(&m_lDamage, (LONG)0); return TRUE; } } else if(m_tNpcType == NPCTYPE_GUILD_GUARD) // 阿 辨靛俊 加茄 版厚捍篮 磊扁辨靛盔栏肺何磐 焊龋(?)罐扁困秦 { if(pUser->m_dwGuild > 0) { /* if(m_pGuardStore) { if(m_pGuardStore->m_iGuildSid == pUser->m_dwGuild) { InterlockedExchange(&m_lDamage, (LONG)0); return TRUE; } */ if(m_pGuardFortress) { if(m_pGuardFortress->m_iGuildSid == pUser->m_dwGuild) { InterlockedExchange(&m_lDamage, (LONG)0); return TRUE; } } } } } if( m_sEvent == NPC_EVENT_GREATE_MOP || m_sEvent== NPC_EVENT_MOP ) //如果打BOSS攻击只有平时的50%. { nDamage = (int)( (double)nDamage * 0.5 ); } userDamage = nDamage; if( (m_sHP - nDamage) < 0 ) userDamage = m_sHP; for(i = 0; i < NPC_HAVE_USER_LIST; i++) { if(m_DamagedUserList[i].iUid == uid) { if(strcmp(m_DamagedUserList[i].strUserID, pUser->m_strUserID) == 0) { m_DamagedUserList[i].nDamage += userDamage; goto go_result; } } } for(i = 0; i < NPC_HAVE_USER_LIST; i++) // 牢盔 力茄捞 弥辆 措固瘤俊 康氢阑 固摹唱? { if(m_DamagedUserList[i].iUid == -1) { if(m_DamagedUserList[i].nDamage <= 0) { strncpy(m_DamagedUserList[i].strUserID, pUser->m_strUserID, iLen); m_DamagedUserList[i].iUid = uid; m_DamagedUserList[i].nDamage = userDamage; m_DamagedUserList[i].bIs = FALSE; break; } } } go_result: m_TotalDamage += userDamage; m_sHP -= nDamage; /* ////////////////////////////////////////////////////////////////////////////////////////////////////////////// 杀BOSS提醒 if( m_sEvent == 31000 ) { CString strMsg = ""; int g_pid = m_sPid; int g_sid = m_sSid; CString Monstername = g_arNpcTable[g_sid]->m_strName; if(m_sHP <= 0 ) { Dead(); strMsg.Format( "玩家『 %s 』战胜BOSS『 %s 』 获得大量的宝贝!", pUser->m_strUserID,m_strName); pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } }*/ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// if( m_sHP <= 0 ) { UserListSort(); // 酒捞袍狼 林牢捞 穿焙啊 m_ItemUserLevel = pUser->m_sLevel; m_sHP = 0; InterlockedExchange(&m_lKillUid, (LONG)uid); if(m_sPid==190) m_sEvent=2; // 磷篮 各捞 涅胶飘俊 加茄 捞亥飘 各牢版快 秦寸 捞亥飘甫 角青 if(m_sEvent > 0 && m_sEvent <= NPC_QUEST_MOP) pUser->RunQuestEvent(this, m_sCurZ, m_sEvent); Dead(); InterlockedExchange(&m_lDamage, (LONG)0); return FALSE; } ChangeTarget(pUser, pCom); InterlockedExchange(&m_lDamage, (LONG)0); } return TRUE; } BOOL CNpc::CheckNpcRegenCount() { /* if(m_NpcState != NPC_DEAD) return FALSE; QueryPerformanceCounter((LARGE_INTEGER*)&m_RegenLastCount); if((m_RegenLastCount - m_RegenStartCount) >= g_Online_Update_Min_ticks) { m_RegenStartCount += g_Online_Update_Min_ticks; m_RegenCount += 10000; } if(m_RegenCount >= (DWORD)m_sRegenTime) return TRUE; */ return FALSE; } ///////////////////////////////////////////////////////////////////////////// // 鸥百捞 笛矾 阶咯 乐栏搁 促澜 鸥百阑 茫绰促. // BOOL CNpc::IsSurround(int targetx, int targety) { if(m_tNpcLongType) return FALSE; //盔芭府绰 烹苞 for(int i = 0; i < (sizeof(surround_x) / sizeof(surround_x[0])); i++) // 林函 8规氢 { if(IsMovable(targetx + surround_x[i], targety + surround_y[i])) return FALSE; } return TRUE; } ///////////////////////////////////////////////////////////////////////////// // 唱甫 傍拜茄 蜡历甫 鸥百栏肺 伙绰促.(扁霖 : 肪苞 HP甫 扁霖栏肺 急沥) // void CNpc::ChangeTarget(USER *pUser, COM* pCom) { int preDamage, lastDamage; int dist; if(m_byAX == 0 && m_byAZ == 0 ) return; // 郴啊 傍拜仿捞 绝栏搁 傍拜窍瘤 臼绰促 if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) return; if(pUser->m_bLive == USER_DEAD) return; if(pUser->m_tIsOP == 1) return; // 款康磊绰 公矫...^^ if(pUser->m_bPShopOpen == TRUE) return; USER *preUser = NULL; preUser = GetUser(pCom, m_Target.id - USER_BAND); if(pUser == preUser) return; if(preUser != NULL && preUser->m_state == STATE_GAMESTARTED) { if(strcmp(pUser->m_strUserID, preUser->m_strUserID) == 0) return; preDamage = 0; lastDamage = 0; preDamage = GetFinalDamage(preUser, 0); lastDamage = GetFinalDamage(pUser, 0); dist = abs(preUser->m_curx - m_sCurX) + abs(preUser->m_cury - m_sCurY); if(dist == 0) return; preDamage = (int)((double)preDamage/dist + 0.5); dist = abs(pUser->m_curx - m_sCurX) + abs(pUser->m_cury - m_sCurY); if(dist == 0) return; lastDamage = (int)((double)lastDamage/dist + 0.5); if(preDamage > lastDamage) return; } m_Target.id = pUser->m_uid + USER_BAND; m_Target.x = pUser->m_curx; m_Target.y = pUser->m_cury; /* if(pUser->m_strUserID != NULL) { m_Target.nLen = strlen(pUser->m_strUserID); if(m_Target.nLen <= CHAR_NAME_LENGTH) strncpy(m_Target.szName, pUser->m_strUserID, m_Target.nLen); else ::ZeroMemory(m_Target.szName, sizeof(m_Target.szName)); } */ // 绢浇贩 芭府绰单 傍拜窍搁 官肺 馆拜 if(m_NpcState == NPC_STANDING || m_NpcState == NPC_MOVING) { // 啊鳖捞 乐栏搁 馆拜栏肺 捞绢瘤备 if(IsCloseTarget(pUser, m_byRange) == TRUE) { m_NpcState = NPC_FIGHTING; NpcFighting(pCom); } else // 官肺 档噶啊搁 谅钎甫 盎脚窍绊 眠利 { if(GetTargetPath(pCom) == TRUE) // 馆拜 悼累饶 距埃狼 掉饭捞 矫埃捞 乐澜 { m_NpcState = NPC_TRACING; NpcTracing(pCom); } else { ToTargetMove(pCom, pUser); } } } // else m_NpcState = NPC_ATTACKING; // 茄曼 傍拜窍绰单 穿啊 规秦窍搁 格钎甫 官厕 if(m_tNpcGroupType) // 啊练鸥涝捞搁 矫具救俊 鞍篮 鸥涝俊霸 格钎 瘤沥 { m_Target.failCount = 0; FindFriend(); } } ///////////////////////////////////////////////////////////////////////////// // NPC 惑怕喊肺 盒拳茄促. // void CNpc::NpcLive(COM *pCom) { if(SetLive(pCom)) { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; if(m_sEvent == 31000) { if(m_sCurZ == 19) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 污染洞3层 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(m_sCurZ == 49) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 矿洞4层 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(m_sCurZ == 400) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 菲利普 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(m_sCurZ == 93) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 忍者洞1层 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(m_sCurZ == 94) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 忍者洞2层 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(m_sCurZ == 36) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 电厂3层 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(m_sCurZ == 416) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 死神洞穴 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(m_sCurZ == 311) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 火神洞2层 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } if(m_sCurZ == 315) { CString strMsg; strMsg.Format("领主BOSS『 %s 』在 [ 机甲工厂 ] 刷新了!", m_strName);//公告提示 pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_ANNOUNCE); } } } else { m_NpcState = NPC_LIVE; m_Delay = m_sStandTime * 10; } } ///////////////////////////////////////////////////////////////////////////// // NPC啊 辑乐绰版快. // void CNpc::NpcStanding(COM *pCom) { NpcTrace(_T("NpcStanding()")); if(RandomMove(pCom) == TRUE) { m_NpcState = NPC_MOVING; if( m_sStandTime > 2500 ) { m_Delay = m_sStandTime - 2000; } else { m_Delay = m_sStandTime; } // m_Delay = m_sStandTime; // m_Delay = m_sSpeed; // 2001-09-01, jjs07 return; } m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; } ///////////////////////////////////////////////////////////////////////////// // NPC啊 捞悼窍绰 版快. // void CNpc::NpcMoving(COM *pCom) { NpcTrace(_T("NpcMoving()")); if(m_sHP <= 0) { Dead(); return; } if(FindEnemy(pCom) == TRUE) // 利阑 茫绰促. { if(m_tNpcType == NPCTYPE_GUARD || m_tNpcType == NPCTYPE_GUILD_GUARD) { m_NpcState = NPC_FIGHTING; m_Delay = 0; } else { m_NpcState = NPC_ATTACKING; m_Delay = m_sSpeed; } return; } if(m_tNpcType == NPCTYPE_GUARD || m_tNpcType == NPCTYPE_GUILD_GUARD) // 捞悼救窍霸... { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; return; } if(IsMovingEnd()) // 捞悼捞 场车栏搁 { m_NpcState = NPC_STANDING; //康开 观俊 乐栏搁 辑乐绰 矫埃阑 陋霸... if(IsInRange()) m_Delay = m_sStandTime; else m_Delay = m_sStandTime - 1000; if(m_Delay < 0) m_Delay = 0; return; } if(StepMove(pCom, 1) == FALSE) // 茄沫 框流烙(叭绰悼累, 崔副锭绰 2沫) { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; return; } CBufferEx TempBuf; CPoint t = ConvertToClient( m_sCurX, m_sCurY ); // 框流捞妨绰 辑滚谅钎啊 努扼捞攫飘俊辑 给框流捞绰 谅钎搁 府畔 if(IsStepEnd()) TempBuf.Add(MOVE_END_RESULT); else TempBuf.Add(MOVE_RESULT); TempBuf.Add(SUCCESS); TempBuf.Add((int)(NPC_BAND + m_sNid)); TempBuf.Add((short)t.x); TempBuf.Add((short)t.y); SendInsight(pCom, TempBuf, TempBuf.GetLength()); m_Delay = m_sSpeed; } ///////////////////////////////////////////////////////////////////////////// // NPC啊 傍拜窍绰版快. // void CNpc::NpcAttacking(COM *pCom) { NpcTrace(_T("NpcAttacking()")); int ret = 0; if( m_byPsi > 0 && m_byPsi < g_arMonsterPsi.GetSize() ) // 付过捞 乐绰 仇捞扼搁... { CMonsterPsi* pMagic = g_arMonsterPsi[(int)m_byPsi]; if( pMagic ) { if( IsCloseTarget( pCom, pMagic->m_byRange ) ) { m_NpcState = NPC_FIGHTING; m_Delay = 0; return; } } } if(IsCloseTarget(pCom, m_byRange)) // 傍拜且 荐 乐绰父怒 啊鳖款 芭府牢啊? { m_NpcState = NPC_FIGHTING; m_Delay = 0; return; } if(m_tNpcType == NPCTYPE_GUARD || m_tNpcType == NPCTYPE_GUILD_GUARD)// 辑乐绰 版厚捍老锭 傍拜捞 角菩窍搁 泪瘪 悼救 浆档废茄促. { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime/2; return; } if(GetTargetPath(pCom) == FALSE) { if(RandomMove(pCom) == FALSE) { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; return; } m_NpcState = NPC_MOVING; m_Delay = m_sSpeed; return; } m_NpcState = NPC_TRACING; m_Delay = 0; } ///////////////////////////////////////////////////////////////////////////// // NPC啊 蜡历甫 眠利窍绰 版快. // void CNpc::NpcTracing(COM *pCom) { NpcTrace(_T("NpcTracing()")); if(m_tNpcType == NPCTYPE_GUARD || m_tNpcType == NPCTYPE_GUILD_GUARD) return; if(GetUser(pCom, (m_Target.id - USER_BAND)) == NULL) // Target User 啊 粮犁窍绰瘤 八荤 { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; return; } if( m_byPsi > 0 && m_byPsi < g_arMonsterPsi.GetSize() ) // 付过捞 乐绰 仇捞扼搁... { CMonsterPsi* pMagic = g_arMonsterPsi[(int)m_byPsi]; if( pMagic ) { if( IsCloseTarget( pCom, pMagic->m_byRange ) ) { m_NpcState = NPC_FIGHTING; m_Delay = 0; return; } } } if(IsCloseTarget(pCom, m_byRange)) // 辟立傈阑 国老父怒 啊鳖款 芭府牢啊? { m_NpcState = NPC_FIGHTING; m_Delay = 0; return; } if(IsSurround(m_Target.x, m_Target.y)) // 格钎 鸥百捞 笛矾阶咯 乐栏搁 促澜 鸥百阑 茫绰促. { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; return; } if(IsChangePath(pCom)) // 辨茫扁甫 促矫 且父怒 Target 狼 困摹啊 函沁绰啊? { if(ResetPath(pCom) == FALSE)// && !m_tNpcTraceType) { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; return; } } if(StepMove(pCom, 1) == FALSE) // 茄沫 框流烙(叭绰悼累, 崔副锭绰 2沫) { m_NpcState = NPC_STANDING; m_Delay = m_sStandTime; return; } CBufferEx TempBuf; CPoint t = ConvertToClient( m_sCurX, m_sCurY ); // 框流捞妨绰 辑滚谅钎啊 努扼捞攫飘俊辑 给框流捞绰 谅钎搁 府畔 if(IsStepEnd()) TempBuf.Add(MOVE_END_RESULT); else TempBuf.Add(MOVE_RESULT); TempBuf.Add(SUCCESS); TempBuf.Add((int)(NPC_BAND + m_sNid)); TempBuf.Add((short)t.x); TempBuf.Add((short)t.y); SendInsight(pCom, TempBuf, TempBuf.GetLength()); m_Delay = m_sSpeed; } ///////////////////////////////////////////////////////////////////////////// // 各狼 加档 函拳甫 舅妨 霖促. // void CNpc::ChangeSpeed(COM *pCom, int delayTime) { /* CBufferEx TempBuf; int tempTime = delayTime * NPC_TRACING_STEP; if(m_Delay > m_sSpeed) m_Delay = m_sSpeed;// 胶拍爹 矫埃捞 1000老版快 m_Delay = m_Delay + tempTime; // 10, 50, 100窜困肺 刘.皑 if(m_Delay <= 500) m_Delay = 500; // 弥家绰 亲惑 500烙 // 500阑 100%捞搁 600篮 80栏肺 皑家 short step = 100 - (m_Delay - 500) * 10/50; TempBuf.Add(SET_SPEED_MONSTER); TempBuf.Add(m_sNid + NPC_BAND); TempBuf.Add(step); // NPC 林困狼 蜡历俊霸 沥焊傈价 CPoint ptOld; if(SightRecalc(ptOld)) { SendRemainSight(pCom, TempBuf, TempBuf.GetLength(), ptOld); } else SendInsight(pCom, TempBuf, TempBuf.GetLength()); */ } ///////////////////////////////////////////////////////////////////////////// // NPC啊 傍拜窍绰 版快. // 有魔法的怪用 void CNpc::NpcFighting(COM *pCom) { NpcTrace(_T("NpcFighting()")); if(m_sHP <= 0) { Dead(); return; } m_dwDelayCriticalDamage = 0; int at_type_total3 = m_iNormalATRatio + m_iSpecialATRatio + m_iMagicATRatio; int at_type[300], i; for( i = 0; i < m_iNormalATRatio; i++ ) at_type[i] = 1; int rand_index = m_iNormalATRatio; for( i = rand_index; i < rand_index+m_iSpecialATRatio; i++ ) at_type[i] = 2; rand_index += m_iSpecialATRatio; for( i = rand_index; i < rand_index+m_iMagicATRatio; i++ ) at_type[i] = 3; int at_type_rand = myrand( 0, at_type_total3); if( at_type[at_type_rand] == 3 && m_byPsi > 0) { m_Delay = PsiAttack( pCom ); if( m_Delay == -1 ) { m_NpcState = NPC_ATTACKING; m_Delay = m_sSpeed; } return; } else if( at_type[at_type_rand] == 2 ) { m_Delay = AreaAttack( pCom ); if( m_Delay == -1 ) { m_NpcState = NPC_ATTACKING; m_Delay = m_sSpeed; } return; }else m_Delay = Attack(pCom); } ///////////////////////////////////////////////////////////////////////////// // 鸥百苞狼 芭府甫 荤沥芭府 裹困肺 蜡瘤茄促.(伎窜困) // void CNpc::NpcBack(COM *pCom) { NpcTrace(_T("NpcBack()")); if(GetUser(pCom, (m_Target.id - USER_BAND)) == NULL) // Target User 啊 粮犁窍绰瘤 八荤 { m_NpcState = NPC_STANDING; m_Delay = m_sSpeed;//STEP_DELAY; return; } if(IsMovingEnd()) // 捞悼捞 场车栏搁 { m_Delay = m_sSpeed; NpcFighting(pCom); return; } if(StepMove(pCom, 1) == FALSE) // 茄沫 框流烙(叭绰悼累, 崔副锭绰 2沫) { m_NpcState = NPC_STANDING; m_Delay = m_sSpeed;//STEP_DELAY; return; } m_Delay = m_sSpeed;//STEP_DELAY; } ///////////////////////////////////////////////////////////////////////////// // 促弗 各苞狼 楷拌甫 困秦辑... // void CNpc::NpcStrategy(BYTE type) { switch(type) { case NPC_ATTACK_SHOUT: m_NpcState = NPC_TRACING; m_Delay = m_sSpeed;//STEP_DELAY; break; } } ///////////////////////////////////////////////////////////////////////////// // 矫具 裹困郴狼 郴悼丰甫 茫绰促. // void CNpc::FindFriend() { CNpc* pNpc = NULL; if(m_bySearchRange == 0) return; int min_x, min_y, max_x, max_y; min_x = m_sCurX - m_bySearchRange; if( min_x < 0 ) min_x = 0; min_y = m_sCurY - m_bySearchRange; if( min_y < 0 ) min_y = 0; max_x = m_sCurX + m_bySearchRange; max_y = m_sCurY + m_bySearchRange; if(max_x >= g_zone[m_ZoneIndex]->m_sizeMap.cx) max_x = g_zone[m_ZoneIndex]->m_sizeMap.cx - 1; if(max_y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) max_y = g_zone[m_ZoneIndex]->m_sizeMap.cy - 1; int ix, iy; int target_uid; int uid; int tempLevel = 0, oldLevel = 1000; if(m_Target.id == -1) return; for(ix = min_x; ix <= max_x; ix++) { for(iy = min_y; iy <= max_y; iy++) { target_uid = m_pOrgMap[ix][iy].m_lUser; if( target_uid >= NPC_BAND && target_uid < INVALID_BAND) { uid = target_uid - NPC_BAND; pNpc = g_arNpc[uid]; if(pNpc == NULL) continue; if(pNpc->m_tNpcGroupType && m_sNid != uid && pNpc->m_sFamilyType == m_sFamilyType) { // pNpc->m_Target.nLen = strlen(pNpc->m_Target.szName); // 捞固 格钎啊 乐绢辑 捞固 傍拜窍绊 乐栏搁... // if(pNpc->m_Target.nLen > 0 && pNpc->m_NpcState == NPC_FIGHTING) continue; if(pNpc->m_Target.id >= 0 && pNpc->m_NpcState == NPC_FIGHTING) continue; pNpc->m_Target.id = m_Target.id; // 鞍篮 鸥涝狼 悼丰俊霸 档框阑 夸没茄促. pNpc->m_Target.x = m_Target.x; // 鞍篮 格钎甫 傍拜窍磊绊... pNpc->m_Target.y = m_Target.y; /* if(m_Target.szName != NULL) { pNpc->m_Target.nLen = strlen(m_Target.szName); if(pNpc->m_Target.nLen <= CHAR_NAME_LENGTH) strncpy(pNpc->m_Target.szName, m_Target.szName, pNpc->m_Target.nLen); else ::ZeroMemory(pNpc->m_Target.szName, sizeof(pNpc->m_Target.szName)); } */ pNpc->m_Target.failCount = 0; pNpc->NpcStrategy(NPC_ATTACK_SHOUT); } } } } } ///////////////////////////////////////////////////////////////////////////// // 鸥百痢栏肺何磐 弥措茄 钢府 冻绢柳 痢阑 茫绰促.(林狼 : 盔芭府绰 加档啊 弧扼具 茄促...救弊矾搁 拌加 芭府甫 蜡瘤窍促 磷绰荐啊 乐促.) // BOOL CNpc::GetBackPoint(int &x, int &y) { int ex = m_sCurX; int ey = m_sCurY; int dx = m_Target.x - m_sCurX; int dy = m_Target.y - m_sCurY; int min = ( abs(dx) + abs(dy) )/2; int max = m_byRange - min; int count = myrand(min, max); if(count <= 0) return FALSE; // 0捞搁 盔贰 荤沥芭府客 鞍促. if(count >= m_byRange && count > 2) count -= 1; // 趣矫唱 隔扼辑 弥措茄 撅力茄促. if(dy > 0) { if(dx > 0) { ex -= count; ey -= count; } else if(dx < 0) { ex += count; ey -= count; } else { ey -= (count*2); } // 圈荐, 娄荐甫 嘎冕促. } else if(dy < 0) { if(dx > 0) { ex -= count; ey += count; } else if(dx < 0) { ex += count; ey += count; } else { ey += (count*2); } } else { if(dx > 0) { ex -= (count*2); } else { ex += (count*2); } } if(IsMovable(ex, ey) == FALSE) // 茫篮 痢捞 框龙荐 绝绰 镑捞扼搁 8规氢栏肺 沤祸 { for(int i = 0; i < (sizeof(surround_x) / sizeof(surround_x[0])); i++) // 林函 8规氢 { if(IsMovable(ex + surround_x[i], ey + surround_y[i])) { x = ex; y = ey; return TRUE; } } } else { x = ex; y = ey; return TRUE; } return FALSE; } ///////////////////////////////////////////////////////////////////////////// // 蜡历府胶飘甫 檬扁拳茄促. // void CNpc::InitUserList() { m_TotalDamage = 0; for(int i = 0; i < NPC_HAVE_USER_LIST; i++) { m_DamagedUserList[i].bIs = FALSE; m_DamagedUserList[i].iUid = -1; m_DamagedUserList[i].nDamage = 0; ::ZeroMemory(m_DamagedUserList[i].strUserID, sizeof(m_DamagedUserList[i].strUserID)); } /* int i; // 版氰摹 盒硅甫 困秦 包府窍带 府胶飘 for(i = 0; i < m_arDamagedUserList.GetSize(); i++) { if(m_arDamagedUserList[i]) { delete m_arDamagedUserList[i]; m_arDamagedUserList[i] = NULL; } } m_arDamagedUserList.RemoveAll(); */ } ///////////////////////////////////////////////////////////////////////////// // 秦寸 概流 加己捞 酒捞袍 拌凯 棺 傀绢沥焊啊 嘎绰瘤 眉农茄促. // BOOL CNpc::CheckClassItem(int artable, int armagic) { if(artable < 0 || artable >= g_arItemTable.GetSize()) return FALSE; if(armagic < 0 || armagic >= g_arMagicItemTable.GetSize()) return FALSE; if(armagic==148||armagic==149||armagic==150||armagic==151||armagic==152||armagic==153||armagic==156|| armagic==158 || armagic==162||armagic==164 || armagic==166){ return FALSE; } int iWear; BYTE armWear = g_arItemTable[artable]->m_byWear; // 酒捞袍 拌凯 1: 公扁 2~8 : 馒侩酒捞袍 BYTE tNeedClass = g_arItemTable[artable]->m_byClass; BYTE armMagic = g_arMagicItemTable[armagic]->m_tNeedClass; // 概流加己 拌凯 if(armMagic != 31/*15*/) { BYTE tTemp = 1; BYTE tFire = 0; BYTE tEdge = 0; BYTE tStaff = 0; BYTE tBrawl = 0; BYTE tJudge = 0; tFire = tTemp & tNeedClass; tTemp = 2; tEdge = tTemp & tNeedClass; tTemp = 4; tStaff = tTemp & tNeedClass; tTemp = 8; tJudge = tTemp & tNeedClass; tTemp = 16; tBrawl = tTemp & tNeedClass; tFire = tFire & armMagic; tEdge = tEdge & armMagic; tStaff = tStaff & armMagic; tBrawl = tBrawl & armMagic; tJudge = tJudge & armMagic; tTemp = tFire^tEdge^tStaff^tBrawl^tJudge; if(!tTemp) return FALSE; // if(tNeedClass != armMagic) return FALSE; } iWear = g_arMagicItemTable[armagic]->m_tWearInfo; // 傀绢 沥焊肺 肋给等 加己捞 嘿绰巴阑 规瘤 if(iWear == 0) return TRUE; else if(iWear == 1) { // 1锅捞搁 公扁幅俊 嘿绰促. if(armWear != 1) return FALSE; else return TRUE; } else if(iWear == 2) // 2锅捞搁 公扁甫 力寇茄 馒侩酒捞袍俊 嘿绰促. { if(armWear <= 1 || armWear >= 9) return FALSE; else return TRUE; } else return FALSE; } void CNpc::DeleteNPC() { // 瘤陛 阁胶磐甫 瘤快瘤 臼绰促. 窜瘤, 静饭靛 惑俊辑 倒瘤 给窍档废 阜扁父 茄促. m_bFirstLive = FALSE; m_tNpcType = 2; // 唱吝俊 扁废窍磊. } ////////////////////////////////////////////////////////////////////////////////// // 弥辆 单固瘤甫 备茄促. // int CNpc::GetFinalDamage(USER *pUser, int type) { if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) return 0; int iInitDamage = GetNormalInitDamage(); int iFinalDamage = 0, iFinalTemp = 0; // int iBasic = (int)((double)(pUser->m_sCON + pUser->m_DynamicUserData[MAGIC_CON_UP])/3 + 0.5); // 扁夯镐笼 int iBasic = (int)((double)(pUser->m_sMagicCON + pUser->m_DynamicUserData[MAGIC_CON_UP] + (int)((double)pUser->m_DynamicEBodyData[EBODY_CON_TO_DEFENSE] / 100 * (double)pUser->m_sMagicCON) )/3 + 0.5); // 扁夯镐笼 if(iBasic < 0) iBasic = 0; BYTE tWeaponClass = 255; BOOL bCanUseSkill = pUser->IsCanUseWeaponSkill(tWeaponClass); int iDefense = 1; double dIron = 0; double dShield = 0; double dGuard = 0; int iCAttack = 0; double dAdamantine = 0; double dDefenseUP = 0; double dABDefense = 0; int iIronLevel = 0; int iGuardLevel = 0; int iVitalLevel = 0; int iCounterAttackLevel = 0; int iDefenseUPLevel = 0; int iABDefenseLevel = 0; int iIS = 0; int iCA = 0; int i = 0; int iRandom = 0; int iSkillSid = 0; int tClass = tWeaponClass * SKILL_NUM; iDefense = pUser->GetDefense(); // 规绢备 if(tWeaponClass != 255) { for(i = tClass; i < tClass + SKILL_NUM; i++) // IronSkill { iSkillSid = pUser->m_UserSkill[i].sSid; if(iSkillSid == SKILL_IRON) // 1 index { iIronLevel = pUser->m_UserSkill[i].tLevel; if(iIronLevel < 0) iIronLevel = 0; // 酒捞袍俊 狼茄 胶懦 函悼 饭骇 if(iIronLevel >= 1) iIronLevel += pUser->m_DynamicUserData[g_DynamicSkillInfo[iSkillSid]]+ pUser->m_DynamicUserData[MAGIC_ALL_SKILL_UP]; if(iIronLevel >= SKILL_LEVEL) iIronLevel = SKILL_LEVEL - 1; if(iSkillSid >= g_arSkillTable.GetSize()) continue; iRandom = (int)((double)XdY(1, 1000) / 10 + 0.5); if(iRandom < g_arSkillTable[iSkillSid]->m_arSuccess.GetAt(iIronLevel)) iIS = 1; // 酒捞攫胶挪俊 狼茄 扁夯 镐笼狼 刘啊 iBasic = (int)((double)iBasic * (1 + (double)(iIS * g_arSkillTable[iSkillSid]->m_arInc.GetAt(iIronLevel) / 100)) ); } if(iSkillSid == SKILL_CRITICAL_GUARD) // Critical Guard 11 index { iGuardLevel = pUser->m_UserSkill[i].tLevel; if(iGuardLevel < 0) iGuardLevel = 0; // 酒捞袍俊 狼茄 胶懦 函悼 饭骇 if(iGuardLevel >= 1) iGuardLevel += pUser->m_DynamicUserData[g_DynamicSkillInfo[iSkillSid]]+ pUser->m_DynamicUserData[MAGIC_ALL_SKILL_UP]; if(iGuardLevel >= SKILL_LEVEL) iGuardLevel = SKILL_LEVEL - 1; if(iSkillSid >= g_arSkillTable.GetSize()) continue; iRandom = (int)((double)XdY(1, 1000) / 10 + 0.5); if(iRandom < g_arSkillTable[iSkillSid]->m_arSuccess.GetAt(iGuardLevel)) { dGuard = (double)(iInitDamage *g_arSkillTable[iSkillSid]->m_arInc.GetAt(iGuardLevel))/100.0; } } if(iSkillSid == SKILL_BACK_ATTACK) // 馆拜 2 index { iCounterAttackLevel = pUser->m_UserSkill[i].tLevel; if(iCounterAttackLevel < 0) iCounterAttackLevel = 0; // 酒捞袍俊 狼茄 胶懦 函悼 饭骇 if(iCounterAttackLevel >= 1) iCounterAttackLevel += pUser->m_DynamicUserData[g_DynamicSkillInfo[iSkillSid]] + pUser->m_DynamicUserData[MAGIC_ALL_SKILL_UP]; if(iCounterAttackLevel >= SKILL_LEVEL) iCounterAttackLevel = SKILL_LEVEL - 1; if(iSkillSid >= g_arSkillTable.GetSize()) continue; if(pUser->GetDistance(m_sCurX, m_sCurY, 1) == FALSE && pUser->m_dwFANTAnTime == 0 ) iCA = 0;//超级反弹 else { iRandom = (int)((double)XdY(1, 1000) / 10 + 0.5); if(iRandom < g_arSkillTable[iSkillSid]->m_arSuccess.GetAt(iCounterAttackLevel)) iCA = 1; } iCAttack = (int)(iInitDamage * iCA * (double)((g_arSkillTable[iSkillSid]->m_arInc.GetAt(iCounterAttackLevel)) / 100.0) ); } if(iSkillSid == SKILL_ABSOLUTE_DEFENSE) // 绝对 Defense { iABDefenseLevel = pUser->m_UserSkill[i].tLevel; if(iABDefenseLevel < 0) iABDefenseLevel = 0; // 酒捞袍俊 狼茄 胶懦 函悼 饭骇 if(iABDefenseLevel >= 1) iABDefenseLevel += pUser->m_DynamicUserData[MAGIC_ALL_SKILL_UP]; if(iABDefenseLevel >= SKILL_LEVEL) iABDefenseLevel = SKILL_LEVEL - 1; if(iSkillSid >= g_arSkillTable.GetSize()) continue; iRandom = (int)((double)XdY(1, 1000) / 10 + 0.5); if(iRandom < g_arSkillTable[iSkillSid]->m_arSuccess.GetAt(iABDefenseLevel)) { dABDefense = (double)(iDefense * (double)g_arSkillTable[iSkillSid]->m_arInc.GetAt(iABDefenseLevel)/100.0); } } if(iSkillSid == SKILL_DEFENSE_UP) // 超级 Defense { iDefenseUPLevel = pUser->m_UserSkill[i].tLevel; if(iDefenseUPLevel < 0) iDefenseUPLevel = 0; // 酒捞袍俊 狼茄 胶懦 函悼 饭骇 if(iDefenseUPLevel >= 1) iDefenseUPLevel += pUser->m_DynamicUserData[MAGIC_ALL_SKILL_UP]; if(iDefenseUPLevel >= SKILL_LEVEL) iDefenseUPLevel = SKILL_LEVEL - 1; if(iSkillSid >= g_arSkillTable.GetSize()) continue; iRandom = (int)((double)XdY(1, 1000) / 10 + 0.5); if(iRandom < g_arSkillTable[iSkillSid]->m_arSuccess.GetAt(iDefenseUPLevel)) { dDefenseUP = (double)(iDefense * (double)g_arSkillTable[iSkillSid]->m_arInc.GetAt(iDefenseUPLevel)/100.0); } } } } // if(pUser->m_dwShieldTime != 0) dShield = (double)(iInitDamage * 0.2); if(pUser->m_bNecklaceOfShield && pUser->m_dwShieldTime != 0) dShield = (double)(iInitDamage * 0.3); else if(pUser->m_bNecklaceOfShield && pUser->m_dwBigShieldTime != 0) dShield = (double)(iInitDamage * 0.35); else if(pUser->m_bNecklaceOfShield || pUser->m_dwShieldTime != 0 ) dShield = (double)(iInitDamage * 0.2); else if(pUser->m_dwBigShieldTime !=0 && !pUser->m_bNecklaceOfShield ) dShield = (double)(iInitDamage * 0.25); if(pUser->m_bNecklaceOfShield) pUser->SendAccessoriDuration(SID_NECKLACE_OF_SHIELD); if( pUser->m_dwAdamantineTime != 0 ) { dAdamantine = (double)( (double)iDefense * 0.1 ); } iDefense = (int)( iDefense + dABDefense + dDefenseUP + dAdamantine ); iFinalDamage = (int)(iInitDamage - (iDefense + iBasic + dShield + dGuard)); if(iFinalDamage < 0) iFinalDamage = 0; if(iFinalDamage <= 15) { iFinalTemp = iFinalDamage; iFinalDamage += (int)((double)iInitDamage * 0.2 + 1.5); // 弥家措固瘤甫 弥措 15栏肺 茄促. if(iFinalDamage > 15) iFinalDamage = 15; iFinalDamage = max(iFinalDamage, iFinalTemp); } if(pUser->m_tAbnormalKind == ABNORMAL_BYTE_COLD) iFinalDamage += 10; if(iCAttack > 0 && type) { iCA = iCAttack; // 弥辆 馆拜 单固瘤 if(iCA > 0) // 馆拜捞 0焊促 努锭 蜡历 府胶飘俊 歹窍绊 盒硅... { // pUser->SetCounterAttack(m_sNid, iCA); // alisia int iDamage = iCA; if( pUser->GetDistance(m_sCurX, m_sCurY, 2) || pUser->m_dwFANTAnTime != 0 ) { if( SetDamage(iDamage, (pUser->m_uid) + USER_BAND, pUser->m_pCom) == FALSE ) { SendExpToUserList( pUser->m_pCom ); // 版氰摹 盒硅!! SendDead( pUser->m_pCom ); if( m_NpcVirtualState == NPC_STANDING ) { CheckMaxValue( pUser->m_dwXP, 1); // 各捞 磷阑锭父 1 刘啊! pUser->SendXP(); } } pUser->SendDamageNum(1,m_sNid + NPC_BAND,(short)iDamage);//拳反击显数字 pUser->SendNpcHP(m_sNid + NPC_BAND,m_sHP); } } } return iFinalDamage; } // int CNpc::SetHuFaFinalDamage(USER *pUser, int iDamage) { if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) return 0; if( SetDamage(iDamage, (pUser->m_uid) + USER_BAND, pUser->m_pCom) == FALSE ) { SendExpToUserList( pUser->m_pCom ); SendDead( pUser->m_pCom ); if( m_NpcVirtualState == NPC_STANDING ) { CheckMaxValue( pUser->m_dwXP, 1); // 各捞 磷阑锭父 1 刘啊! pUser->SendXP(); } } return 0; } /////////////////////////////////////////////////////////////////////////////////////// // NPC 沥焊甫 滚欺俊 历厘茄促. // NPC模型 void CNpc::FillNpcInfo(char *temp_send, int &index, BYTE flag) { CPoint t; SetByte(temp_send, NPC_INFO, index ); SetByte(temp_send, flag, index ); SetShort(temp_send, m_sNid+NPC_BAND, index ); if(flag != INFO_MODIFY) return; SetShort(temp_send, m_sPid, index); SetVarString(temp_send, m_strName, _tcslen(m_strName), index); t = ConvertToClient(m_sCurX, m_sCurY); SetShort(temp_send, t.x, index); SetShort(temp_send, t.y, index); if(m_sHP <= 0) SetByte(temp_send, 0x00, index); else SetByte(temp_send, 0x01, index); SetByte(temp_send, m_tNpcType, index); SetInt(temp_send, m_sMaxHP, index); SetInt(temp_send, m_sHP, index); SetByte(temp_send , 0x00, index); SetByte(temp_send , 0x00, index); SetByte(temp_send , 0x00, index); SetByte(temp_send , 0x00, index); SetByte(temp_send , 0x00, index); SetByte(temp_send , 0x00, index); SetByte(temp_send , 0x00, index); SetByte(temp_send , 0x00, index); SetShort(temp_send, m_sClientSpeed, index); SetByte(temp_send, m_byColor, index); if(m_tNpcType == NPCTYPE_GUILD_DOOR) { SetShort(temp_send, m_sDimension, index); } SetShort(temp_send, m_sQuestSay, index); } /////////////////////////////////////////////////////////////////////////////////////// // 辑滚谅钎甫 努扼捞攫飘 谅钎肺 官槽促. // CPoint CNpc::ConvertToClient(int x, int y) { if(!g_zone[m_ZoneIndex]) return CPoint(-1,-1); int tempx, tempy; int temph = g_zone[m_ZoneIndex]->m_vMoveCell.m_vDim.cy / 2 - 1; if( y >= g_zone[m_ZoneIndex]->m_sizeMap.cy || x >= g_zone[m_ZoneIndex]->m_sizeMap.cx ) return CPoint(-1,-1); tempx = x - temph + y; tempy = y - x + temph; return CPoint( tempx, tempy ); } ////////////////////////////////////////////////////////////////////////////////////////// // 老沥 康开狼 蜡历俊霸 单捞磐甫 焊辰促. // void CNpc::SendToRange(COM *pCom, char *temp_send, int index, int min_x, int min_y, int max_x, int max_y) { /* if( index <= 0 || index >= SEND_BUF_SIZE ) return; SEND_DATA* pNewData = NULL; pNewData = new SEND_DATA; if( !pNewData ) return; pNewData->flag = SEND_RANGE; pNewData->len = index; ::CopyMemory( pNewData->pBuf, temp_send, index ); pNewData->uid = 0; pNewData->z = m_sCurZ; pNewData->rect.left = min_x; pNewData->rect.right = max_x; pNewData->rect.top = min_y; pNewData->rect.bottom = max_y; pNewData->zone_index = m_ZoneIndex; EnterCriticalSection( &(pCom->m_critSendData) ); pCom->m_arSendData.Add( pNewData ); LeaveCriticalSection( &(pCom->m_critSendData) ); PostQueuedCompletionStatus( pCom->m_hSendIOCP, 0, 0, NULL ); */ if( index <= 0 || index >= SEND_BUF_SIZE ) return; MAP* pMap = g_zone[m_ZoneIndex]; if( !pMap ) return; int tmin_x = min_x; if(tmin_x < 0 ) tmin_x = 0; int tmax_x = max_x; if(tmax_x >= pMap->m_sizeMap.cx ) tmax_x = pMap->m_sizeMap.cx - 1; int tmin_y = min_y; if(tmin_y < 0 ) tmin_y = 0; int tmax_y = max_y; if(tmax_y >= pMap->m_sizeMap.cy ) tmax_y = pMap->m_sizeMap.cy - 1; int temp_uid; USER* pUser = NULL; for( int i = tmin_x; i < tmax_x; i++ ) { for( int j = tmin_y; j < tmax_y; j++ ) { temp_uid = pMap->m_pMap[i][j].m_lUser; if(temp_uid < USER_BAND || temp_uid >= NPC_BAND) continue; else temp_uid -= USER_BAND; if( temp_uid >= 0 && temp_uid < MAX_USER ) { pUser = pCom->GetUserUid(temp_uid); if ( pUser == NULL ) continue; if( pUser->m_state == STATE_GAMESTARTED ) { if( pUser->m_curx == i && pUser->m_cury == j && pUser->m_curz == m_sCurZ ) { Send( pUser, temp_send, index); } } } } } } ///////////////////////////////////////////////////////////////////////////////////////// // 傍拜措惑(Target)阑 檬扁拳 茄促. // inline void CNpc::InitTarget() { m_Target.id = -1; m_Target.x = 0; m_Target.y = 0; m_Target.failCount = 0; // m_Target.nLen = 0; // ::ZeroMemory(m_Target.szName, sizeof(m_Target.szName)); } ///////////////////////////////////////////////////////////////////////////////////////// // PathFind 甫 荐青茄促. // BOOL CNpc::PathFind(CPoint start, CPoint end) { m_bRandMove = FALSE; if(start.x < 0 || start.y < 0 || end.x < 0 || end.y < 0) { return FALSE; } int i, j; int min_x, max_x; int min_y, max_y; min_x = m_min_x; min_y = m_min_y; max_x = m_max_x; max_y = m_max_y; if(InterlockedCompareExchange((LONG*)&m_lMapUsed, (long)1, (long)0) == (long)0) { ClearPathFindData(); m_vMapSize.cx = max_x - min_x + 1; m_vMapSize.cy = max_y - min_y + 1; /* m_pMap = new int*[m_vMapSize.cx]; for(i = 0; i < m_vMapSize.cx; i++) { m_pMap[i] = new int[m_vMapSize.cy]; } */ for(i = 0; i < m_vMapSize.cy; i++) { for(j = 0; j < m_vMapSize.cx; j++) { if( min_x+j == m_sCurX && min_y+i == m_sCurY ) { m_pMap[j*m_vMapSize.cy + i] = 0; // m_pMap[j][i] = 0; } else { if(m_pOrgMap[min_x + j][min_y + i].m_bMove || m_pOrgMap[min_x + j][min_y + i].m_lUser != 0 ) { // m_pMap[j][i] = 1; m_pMap[j*m_vMapSize.cy + i] = 1; } else { // m_pMap[j][i] = 0; m_pMap[j*m_vMapSize.cy + i] = 0; } } } } m_vStartPoint = start; m_vEndPoint = end; m_pPath = NULL; m_vPathFind.SetMap(m_vMapSize.cx, m_vMapSize.cy, m_pMap); m_pPath = m_vPathFind.FindPath(end.x, end.y, start.x, start.y); ::InterlockedExchange(&m_lMapUsed, 0); if(m_pPath) { return TRUE; } else { return FALSE; } } else return FALSE; } ///////////////////////////////////////////////////////////////////////////// // 菩胶 颇牢靛俊辑 茫篮 版肺甫 促 捞悼 沁绰瘤 眉农 // BOOL CNpc::IsStepEnd() { if( !m_pPath ) return FALSE; if( m_NpcState != NPC_MOVING && m_NpcState != NPC_TRACING ) return FALSE; if( !m_pPath->Parent ) return TRUE; return FALSE; } ///////////////////////////////////////////////////////////////////////////// // NPC 扁夯沥焊 檬扁拳 // void CNpc::Init() { int i, j; m_dLastFind = GetTickCount(); m_Delay = 0; m_dwLastThreadTime = GetTickCount(); if((m_sOrgX + m_sOrgY) % 2 != 0) m_sOrgX++; CPoint pt = ConvertToServer(m_sOrgX, m_sOrgY); if(pt.x == -1 || pt.y == -1) { CString szTemp; szTemp.Format(_T("Invalid NPC AXIS : Name = %s, x = %d, y = %d"), m_strName, m_sOrgX, m_sOrgY); AfxMessageBox(szTemp); InterlockedIncrement(&g_CurrentNPCError); } else { m_sTableOrgX = m_sOrgX = pt.x; m_sTableOrgY = m_sOrgY = pt.y; m_NpcVirtualState = NPC_STANDING; if(m_sGuild >= NPC_GUILDHOUSE_BAND) // 档矫甫 扁霖栏肺 0锅 档矫, 1, 2 捞繁侥栏肺 柳青 m_sGuild = 10000(贸澜) { // 0锅档矫 = 荤唱靛 1锅档矫 = 器飘矫... 捞扒 10000锅措烙 int index = 0; index = GetCityNumForVirtualRoom(m_sCurZ); if(index >= 0) g_arGuildHouseWar[index]->m_CurrentGuild.arNpcList.Add(m_sNid); m_NpcVirtualState = NPC_WAIT; } else if(m_tNpcType == NPCTYPE_MONSTER && m_sGuild >= FORTRESS_BAND && m_sGuild < NPC_GUILDHOUSE_BAND) // 捞扒 1000锅措烙 { for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == (short)(m_sGuild)) { g_arGuildFortress[i]->m_arViolenceNpcList.Add(m_sNid); m_NpcVirtualState = NPC_WAIT; // m_pGuardFortress = g_arGuildFortress[i]; break; } } } switch(m_tNpcType) { case NPCTYPE_GUILD_GUARD: case NPCTYPE_GUILD_NPC: { if(m_sGuild < FORTRESS_BAND) // 惑痢俊 加茄 版厚捍捞搁 { CStore *pStore = NULL; for(i = 0; i < g_arStore.GetSize(); i++) { if(g_arStore[i] == NULL) continue; pStore = g_arStore[i]; // 惑痢 器牢磐甫 掘绢柯促.(辨靛傈俊辑 静扁困秦) if(pStore->m_sStoreID == (short)m_sGuild) { pStore->m_arNpcList.Add(m_sNid); // 郴 酒捞叼甫 惑痢俊 殿废茄促. m_pGuardStore = pStore; break; } } } else // 夸货俊 加茄 NPC捞搁... { for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == (short)(m_sGuild)) { if(m_sEZone >= GUILD_FORTRESS_NPC_BAND) { for(j = 0; j < FORTRESS_TARGET_MAX_NUM; j++) { if(InterlockedCompareExchange((long*)&g_arGuildFortress[i]->m_arFortressTarget[j].lUsed, (long)1, (long)0) == (long)0) { g_arGuildFortress[i]->m_arFortressTarget[j].sTargertID = m_sNid; break; } } } else if(m_sEZone < GUILD_FORTRESS_NPC_BAND && m_sEZone >= GUILD_REPAIR_NPC_BAND) // 荐府瞪荐 乐绰 巩, 堡急器 { g_arGuildFortress[i]->m_arRepairNpcList.Add(m_sNid); } g_arGuildFortress[i]->m_arNpcList.Add(m_sNid); m_pGuardFortress = g_arGuildFortress[i]; break; } } } } break; case NPCTYPE_GUILD_MARK: { if(m_sGuild >= 0 && m_sGuild < g_arGuildData.GetSize()) { m_sPid = g_arGuildHouse[m_sGuild]->iGuild; g_arGuildHouse[m_sGuild]->iMarkNpc = m_sNid; ::ZeroMemory(m_strName, sizeof(m_strName)); if( m_sPid >= 0 && m_sPid < g_arGuildData.GetSize()) { // 辨靛俊 悼扁拳甫 且鞘夸啊 绝促. if(g_arGuildData[m_sPid]) { int nLen = 0; m_sMaxHP = g_arGuildData[m_sPid]->m_sVersion; nLen = strlen(g_arGuildData[m_sPid]->m_strGuildName); if(nLen > 0) { strncpy(m_strName, g_arGuildData[m_sPid]->m_strGuildName, nLen); } } } } } break; case NPCTYPE_GUILD_DOOR: { for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == (short)(m_sGuild)) { if(m_sEZone < GUILD_FORTRESS_NPC_BAND && m_sEZone >= GUILD_REPAIR_NPC_BAND) // 荐府瞪荐 乐绰 巩, 堡急器 { g_arGuildFortress[i]->m_arRepairNpcList.Add(m_sNid); } g_arGuildFortress[i]->m_arNpcList.Add(m_sNid); m_pGuardFortress = g_arGuildFortress[i]; break; } } } /* case NPCTYPE_FORTRESS: { for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == (short)(m_sGuild)) { for(j = 0; j < FORTRESS_TARGET_MAX_NUM; j++) { if(InterlockedCompareExchange(&g_arGuildFortress[i]->m_arFortressTarget[j].lUsed, (LONG)1, (LONG)0) == 0) { g_arGuildFortress[i]->m_arFortressTarget[j].bChange = FALSE; g_arGuildFortress[i]->m_arFortressTarget[j].sTargertID = m_sNid; m_pGuardFortress = g_arGuildFortress[i]; break; } } } } } break; /* case NPCTYPE_REPAIR_GUARD: { for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == (short)(m_sGuild)) { g_arGuildFortress[i]->m_arRepairNpcList.Add(m_sNid); m_pGuardFortress = g_arGuildFortress[i]; break; } } } break; */ } /* else { m_sOrgX = pt.x; m_sOrgY = pt.y; m_NpcVirtualState = NPC_STANDING; if(m_sGuild >= NPC_GUILDHOUSE_BAND) // 档矫甫 扁霖栏肺 0锅 档矫, 1, 2 捞繁侥栏肺 柳青 m_sGuild = 10000(贸澜) { // 0锅档矫 = 荤唱靛 1锅档矫 = 器飘矫... int index = 0; index = GetCityNumForVirtualRoom(m_sCurZ); g_arGuildHouseWar[index]->m_CurrentGuild.arNpcList.Add(m_sNid); m_NpcVirtualState = NPC_WAIT; } else if(m_tNpcType == NPCTYPE_GUILD_GUARD || m_tNpcType == NPCTYPE_GUILD_NPC) { if(m_sGuild < FORTRESS_BAND) // 惑痢俊 加茄 版厚捍捞搁 { CStore *pStore = NULL; for(i = 0; i < g_arStore.GetSize(); i++) { if(g_arStore[i] == NULL) continue; pStore = g_arStore[i]; // 惑痢 器牢磐甫 掘绢柯促.(辨靛傈俊辑 静扁困秦) if(pStore->m_sStoreID == (short)m_sGuild) { pStore->m_arNpcList.Add(m_sNid); // 郴 酒捞叼甫 惑痢俊 殿废茄促. m_pGuardStore = pStore; break; } } } else // 夸货俊 加茄 NPC捞搁... { for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == (short)(m_sGuild)) { g_arGuildFortress[i]->m_arNpcList.Add(m_sNid); m_pGuardFortress = g_arGuildFortress[i]; break; } } } } else if(m_tNpcType == NPCTYPE_GUILD_MARK) { if(m_sGuild >= 0 && m_sGuild < g_arGuildData.GetSize()) { m_sPid = g_arGuildHouse[m_sGuild]->iGuild; g_arGuildHouse[m_sGuild]->iMarkNpc = m_sNid; ::ZeroMemory(m_strName, sizeof(m_strName)); if( m_sPid >= 0 && m_sPid < g_arGuildData.GetSize()) { // 辨靛俊 悼扁拳甫 且鞘夸啊 绝促. if(g_arGuildData[m_sPid]) { int nLen = 0; m_sMaxHP = g_arGuildData[m_sPid]->m_sVersion; nLen = strlen(g_arGuildData[m_sPid]->m_strGuildName); if(nLen > 0) { strncpy(m_strName, g_arGuildData[m_sPid]->m_strGuildName, nLen); } } } } } else if(m_tNpcType == NPCTYPE_FORTRESS) { for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == (short)(m_sGuild)) { for(j = 0; j < FORTRESS_TARGET_MAX_NUM; j++) { if(InterlockedCompareExchange(&g_arGuildFortress[i]->m_arFortressTarget[j].lUsed, (LONG)1, (LONG)0) == 0) { g_arGuildFortress[i]->m_arFortressTarget[j].bChange = FALSE; g_arGuildFortress[i]->m_arFortressTarget[j].sTargertID = m_sNid; m_pGuardFortress = g_arGuildFortress[i]; break; } } } } } else if(m_tNpcType == NPCTYPE_REPAIR_GUARD) { for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == (short)(m_sGuild)) { g_arGuildFortress[i]->m_arRepairNpcList.Add(m_sNid); m_pGuardFortress = g_arGuildFortress[i]; break; } } } */ } m_pOrgMap = g_zone[m_ZoneIndex]->m_pMap; // MapInfo 沥焊 悸泼 } int CNpc::GetCityNumForVirtualRoom(int zone) // 瘤陛篮 档矫锅龋瘤父 唱吝俊 VirtualRoom捞 拌加 眠啊登搁.. { // 官操绢具 等促. (int zone, int &curGuild) int nRet = -1; switch(zone) // 眠啊且 抗沥烙... { case 1005: // 1004粮 捞搁.. nRet = SANAD; // 档矫绰 荤唱靛, m_CurrentGuild = 0锅掳 break; default: break; } return nRet; } //////////////////////////////////////////////////////////////////////////////////// // Client 谅钎甫 辑滚谅钎肺 函券茄促 // CPoint CNpc::ConvertToServer(int x, int y) { if(!g_zone[m_ZoneIndex]) return CPoint(-1,-1); int tempx, tempy; int temph = g_zone[m_ZoneIndex]->m_vMoveCell.m_vDim.cy / 2 - 1; if( y >= g_zone[m_ZoneIndex]->m_vMoveCell.m_vDim.cy || x >= g_zone[m_ZoneIndex]->m_vMoveCell.m_vDim.cx ) return CPoint(-1,-1); if( (x+y)%2 == 0 ) { tempx = temph - ( y / 2 ) + ( x / 2 ); if( x % 2 ) tempy = ( y / 2 ) + ( ( x / 2 ) + 1 ); else tempy = ( y / 2 ) + ( x / 2 ); return CPoint( tempx, tempy ); } else return CPoint(-1,-1); } //////////////////////////////////////////////////////////////////////////////////// // 叼滚彪阑 困秦 NPC 沥焊甫 TRACE 茄促. // void CNpc::NpcTrace(TCHAR *pMsg) { if(g_bDebug == FALSE) return; CString szMsg = _T(""); CPoint pt = ConvertToClient(m_sCurX, m_sCurY); szMsg.Format(_T("%s : uid = %d, name = %s, xpos = %d, ypos = %d\n"), pMsg, m_sNid, m_strName, pt.x, pt.y); TRACE(szMsg); } /////////////////////////////////////////////////////////////////////////////////// // 傍拜阑 寸沁阑锭 菩胶颇牢爹俊 角菩窍搁 蜡历俊霸 啊鳖款率栏肺 框流牢促. // void CNpc::ToTargetMove(COM *pCom, USER *pUser) { if(!pCom) return; if(!pUser) return; if(!g_zone[m_ZoneIndex]) return; int xx[] = {-1, -1, 0, 1, 1, 1, 0, -1}; int yy[] = {0, -1, -1, -1, 0, 1, 1, 1}; CPoint ptUser = ConvertToClient(pUser->m_curx, pUser->m_cury); struct _min { int x, y; int value; }min; int minindex; int i, j; int dx, dy; CPoint ptNew; int max_dist; /* int test1[8], test2[8]; for(i = 0; i < 8; i++) { ptNew = ConvertToClient(m_sCurX + xx[i], m_sCurY + yy[i]); dx = abs(ptUser.x - ptNew.x); dy = abs(ptUser.y - ptNew.y); test1[i] = dx + dy; } */ for(i = 0; i < sizeof(xx)/sizeof(xx[0]) - 1; i++) { minindex = i; ptNew = ConvertToClient(m_sCurX + xx[i], m_sCurY + yy[i]); if(ptNew.x <= -1 || ptNew.y <= -1) continue; if(ptNew.x >= g_zone[m_ZoneIndex]->m_sizeMap.cx || ptNew.y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) continue; dx = abs(ptUser.x - ptNew.x); dy = abs(ptUser.y - ptNew.y); max_dist = dx + dy; min.value = max_dist; min.x = xx[i]; min.y = yy[i]; for(j = i + 1; j < sizeof(xx)/sizeof(xx[0]); j++) { ptNew = ConvertToClient(m_sCurX + xx[j], m_sCurY + yy[j]); if(ptNew.x <= -1 || ptNew.y <= -1) continue; if(ptNew.x >= g_zone[m_ZoneIndex]->m_sizeMap.cx || ptNew.y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) continue; dx = abs(ptUser.x - ptNew.x); dy = abs(ptUser.y - ptNew.y); max_dist = dx + dy; if(min.value > max_dist) { min.value = max_dist; min.x = xx[j]; min.y = yy[j]; minindex = j; } } xx[minindex] = xx[i]; yy[minindex] = yy[i]; xx[i] = min.x; yy[i] = min.y; } /* for(i = 0; i < 8; i++) { ptNew = ConvertToClient(m_sCurX + xx[i], m_sCurY + yy[i]); dx = abs(ptUser.x - ptNew.x); dy = abs(ptUser.y - ptNew.y); test2[i] = dx + dy; } */ MAP* pMap = g_zone[m_ZoneIndex]; CPoint ptPre(m_sCurX, m_sCurY); int will_x, will_y; BOOL bMove = FALSE; int new_dist = 0, cur_dist = 0; CPoint ptCurr = ConvertToClient(m_sCurX, m_sCurY); cur_dist = abs(ptUser.x - ptCurr.x) + abs(ptUser.y - ptCurr.y); for(i = 0; i < sizeof(xx)/sizeof(xx[0]); i++) { will_x = m_sCurX + xx[i]; will_y = m_sCurY + yy[i]; ptNew = ConvertToClient(m_sCurX + xx[i], m_sCurY + yy[i]); new_dist = abs(ptUser.x - ptNew.x) + abs(ptUser.y - ptNew.y); if(new_dist > cur_dist) continue; if(will_x <= -1 || will_y <= -1) continue; if(will_x >= g_zone[m_ZoneIndex]->m_sizeMap.cx || will_y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) continue; if(pMap->m_pMap[will_x][will_y].m_bMove != 0 || pMap->m_pMap[will_x][will_y].m_lUser != 0) { continue; } else { if(InterlockedCompareExchange((LONG*)&m_pOrgMap[will_x][will_y].m_lUser, (long)m_pOrgMap[m_sCurX][m_sCurY].m_lUser, (long)0) == (long)0) { ::InterlockedExchange(&m_pOrgMap[m_sCurX][m_sCurY].m_lUser, 0); m_sCurX = will_x; m_sCurY = will_y; SightRecalc(pCom); bMove = TRUE; break; } else continue; } } if(!bMove) return; CBufferEx TempBuf; CPoint t = ConvertToClient(m_sCurX, m_sCurY); // 框流捞妨绰 辑滚谅钎啊 努扼捞攫飘俊辑 给框流捞绰 谅钎搁 府畔 if(t.x <= -1 || t.y <= -1) return; TempBuf.Add(MOVE_RESULT); TempBuf.Add(SUCCESS); TempBuf.Add((int)(NPC_BAND + m_sNid)); TempBuf.Add((short)t.x); TempBuf.Add((short)t.y); SendInsight(pCom, TempBuf, TempBuf.GetLength()); m_Delay = m_sSpeed; } void CNpc::EventNpcInit(int x, int y) { m_dwLastThreadTime = GetTickCount(); m_sOrgX = x; m_sOrgY = y; m_pOrgMap = g_zone[m_ZoneIndex]->m_pMap; // MapInfo 沥焊 悸泼 m_Delay = 0; } /////////////////////////////////////////////////////////////////////////////////// // 辨靛傈阑 困秦 鸥涝阑 官槽促. // void CNpc::SetGuildType(COM *pCom) { /* int modify_index = 0; char modify_send[2048]; ::ZeroMemory(modify_send, sizeof(modify_send)); if(m_tGuildWar == GUILD_WARRING) { m_tNpcAttType = 1; if(m_tNpcType == NPCTYPE_GUARD) m_tNpcType = NPCTYPE_GUILD_GUARD; else if(m_tNpcType == NPCTYPE_NPC) m_tNpcType = NPCTYPE_GUILD_NPC; // 蜡历俊霸 NPC 沥焊傈价... FillNpcInfo(modify_send, modify_index, INFO_MODIFY); SendInsight(pCom, modify_send, modify_index); } else if(m_tGuildWar == GUILD_WAR_AFFTER) { m_tNpcAttType = 1; if(m_tNpcType == NPCTYPE_GUILD_GUARD) m_tNpcType = NPCTYPE_GUARD; else if(m_tNpcType == NPCTYPE_GUILD_NPC) m_tNpcType = NPCTYPE_NPC; FillNpcInfo(modify_send, modify_index, INFO_MODIFY); SendInsight(pCom, modify_send, modify_index); } */ } void CNpc::SetDamagedInGuildWar(int nDamage, USER *pUser)// COM *pCom) { int i, j; BOOL flag = FALSE; CNpc *pNpc = NULL; // int uid = uuid - USER_BAND; // USER* pUser = GetUser(pCom, uid); // 辨靛傈阑 脚没茄 辨靛牢瘤 魄窜. if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) return; if(pUser->m_dwGuild <= 0 || pUser->m_tGuildWar == GUILD_WAR_AFFTER) return; if(pUser->m_dwGuild == m_pGuardStore->m_iGuildSid) return; // 规绢螟捞 绊狼肺 傍拜窍绰巴阑 规瘤茄促. if(m_pGuardStore->m_lUsed == 0) return; // 辨靛傈捞 场车促. for(j = 0; j < GUILD_ATTACK_MAX_NUM; j++) { if(pUser->m_dwGuild != m_pGuardStore->m_arAttackGuild[j]) continue; m_sHP -= nDamage; if( m_sHP <= 0 ) // 咯扁俊辑 辨靛傈捞 场抄促. { m_sHP = m_sMaxHP; if(InterlockedCompareExchange((LONG*)&m_pGuardStore->m_lUsed, (long)0, (long)1) == (long)1) { // 1俊辑 0栏肺 父甸绊 秦寸 惑痢傈里篮 场车澜阑 舅赴促. if(pUser->StoppingTheGuildWar(m_pGuardStore)) { // 秦寸 NPC俊霸 舅妨霖促. for(i =0; i < m_pGuardStore->m_arNpcList.GetSize(); i++) { pNpc = GetNpc(m_pGuardStore->m_arNpcList[i]); if(pNpc) { pNpc->m_tGuildWar = GUILD_WAR_AFFTER; pNpc->m_tNpcAttType = 0; } } m_tGuildWar = GUILD_WAR_AFFTER; flag = TRUE; break; } } } } if(flag) { for(j = 0; j < GUILD_ATTACK_MAX_NUM; j++) { m_pGuardStore->m_arAttackGuild[j] = 0; } } return; } void CNpc::Send(USER *pUser, TCHAR *pBuf, int nLength) { if ( !pUser ) return; pUser->Send( pBuf, nLength ); } /////////////////////////////////////////////////////////////////////////////////// // 烙矫 捞亥飘 内靛烙 (扁埃 : 2001斥 12岿 29老 ~~ 2002斥 1岿 2老) // //@@@@@@@@@@@@@@@@@@@@@@@@ void CNpc::GiveEventItemToUser(USER *pUser) { return; BOOL bFlag = FALSE; int iEventItemSid = 0; int iEventNum = -1; int iEvent = 0; int iSlot = 0; int j; SYSTEMTIME gTime; GetLocalTime(&gTime); // if(gTime.wYear == 2002 && gTime.wDay >= 2) return; // if(gTime.wMonth != 2) return; // 2岿 汲朝侩 捞亥飘 if(gTime.wDay < 7 || gTime.wDay > 13) return;// 8老何磐 13老鳖瘤 if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) return; if(abs(m_byClassLevel - pUser->m_sLevel) > 25) { if(m_byClassLevel < pUser->m_sLevel) return; } iEventNum = GetEventItemNum(pUser->m_pCom); if(iEventNum < 0) return; int type = (int)g_arAddEventItemTable[iEventNum]->m_tType; if(type < 100 || type > 255) return; if(!UpdateEventItem(iEventNum)) { g_arAddEventItemTable[iEventNum]->m_tEnd = 0; return; } CString strMsg = _T(""); iSlot = pUser->GetEmptySlot(INVENTORY_SLOT); if(iSlot != -1) { if(NPC_EVENT_ITEM >= g_arItemTable.GetSize()) { int ttt = 0; } if(pUser->m_iMaxWeight >= pUser->m_iCurWeight + g_arItemTable[NPC_EVENT_ITEM]->m_byWeight) bFlag = TRUE; } switch(type) { /* case 1: if(bFlag) { iEvent = EVENT_SP1_ITEM; strMsg.Format("泅犁 %s丛膊辑 归拳痢 惑前鼻甫 掘栏继嚼聪促.", pUser->m_strUserID); } else iEvent = 1001; break; case 2: if(bFlag) { iEvent = EVENT_SP2_ITEM; strMsg.Format("泅犁 %s丛膊辑 巩拳 惑前鼻甫 掘栏继嚼聪促.", pUser->m_strUserID); } else iEvent = 1002; break; case 3: if(bFlag) { iEvent = EVENT_DEF_ITEM; strMsg.Format("泅犁 %s丛膊辑 规绢备 背券鼻甫 掘栏继嚼聪促.", pUser->m_strUserID); } else iEvent = 1003; break; case 4: if(bFlag) { iEvent = EVENT_ATT_ITEM; strMsg.Format("泅犁 %s丛膊辑 公扁 背券鼻甫 掘栏继嚼聪促.", pUser->m_strUserID); } else iEvent = 1004; break; case 5: if(bFlag) { iEvent = EVENT_POT_ITEM; strMsg.Format("泅犁 %s丛膊辑 拱距 背券鼻甫 掘栏继嚼聪促.", pUser->m_strUserID); } else iEvent = 1005; break; */ case EVENT_ATT7_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_ATT7_ITEM; strMsg.Format(IDS_EVENT_ATT7_ITEM, pUser->m_strUserID); } else iEvent = 1001; break; case EVENT_DEF7_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_DEF7_ITEM; strMsg.Format(IDS_EVENT_DEF7_ITEM, pUser->m_strUserID); } else iEvent = 1002; break; case EVENT_ATT6_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_ATT6_ITEM; strMsg.Format(IDS_EVENT_ATT6_ITEM, pUser->m_strUserID); } else iEvent = 1003; break; case EVENT_DEF6_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_DEF6_ITEM; strMsg.Format(IDS_EVENT_DEF6_ITEM, pUser->m_strUserID); } else iEvent = 1004; break; case EVENT_ATT_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_ATT_ITEM; strMsg.Format(IDS_EVENT_ATT5_ITEM, pUser->m_strUserID); } else iEvent = 1005; break; case EVENT_DEF_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_DEF_ITEM; strMsg.Format(IDS_EVENT_DEF5_ITEM, pUser->m_strUserID); } else iEvent = 1006; break; case EVENT_ATT4_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_ATT4_ITEM; strMsg.Format(IDS_EVENT_ATT4_ITEM, pUser->m_strUserID); } else iEvent = 1007; break; case EVENT_DEF4_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_DEF4_ITEM; strMsg.Format(IDS_EVENT_DEF4_ITEM, pUser->m_strUserID); } else iEvent = 1008; break; case EVENT_ATT3_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_ATT3_ITEM; strMsg.Format(IDS_EVENT_ATT3_ITEM, pUser->m_strUserID); } else iEvent = 1009; break; case EVENT_DEF3_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_DEF3_ITEM; strMsg.Format(IDS_EVENT_DEF3_ITEM, pUser->m_strUserID); } else iEvent = 1010; break; case EVENT_INIT_STAT_ITEM: if(bFlag) { iEventItemSid = NPC_EVENT_INIT_STAT; iEvent = EVENT_INIT_STAT_ITEM; strMsg.Format(IDS_EVENT_RESET_STAT, pUser->m_strUserID); } else iEvent = 1011; break; case EVENT_USER_GAME_TIME: if(bFlag) { iEventItemSid = NPC_EVENT_ITEM; iEvent = EVENT_USER_GAME_TIME; strMsg.Format(IDS_EVENT_PERSONAL, pUser->m_strUserID); } else iEvent = 1012; break; break; default: return; break; } if(bFlag) // 磊悼栏肺 牢亥俊 茫酒辑 甸绢埃促. { if(iEventItemSid == NPC_EVENT_ITEM || iEventItemSid == NPC_EVENT_INIT_STAT) { pUser->m_UserItem[iSlot].tType = TYPE_ITEM; pUser->m_UserItem[iSlot].sLevel = g_arItemTable[iEventItemSid]->m_byRLevel; pUser->m_UserItem[iSlot].sSid = g_arItemTable[iEventItemSid]->m_sSid; pUser->m_UserItem[iSlot].sCount = 1; pUser->m_UserItem[iSlot].sDuration = g_arItemTable[iEventItemSid]->m_sDuration; pUser->m_UserItem[iSlot].sBullNum = g_arItemTable[iEventItemSid]->m_sBullNum; pUser->m_UserItem[iSlot].tIQ = (BYTE)iEvent; pUser->m_UserItem[iSlot].iItemSerial = 0; SetISerialToItem(&pUser->m_UserItem[iSlot], iEventNum); /* for(j = 0; j < MAGIC_NUM; j++) { pUser->m_UserItem[iSlot].tMagic[j] = 0; pUser->m_UserItem[iSlot].tMagic[j] = tSerial[j];//g_arAddEventItemTable[iEventNum]->m_strSerialNum[j]; } */ CBufferEx TempBuf; TempBuf.Add(ITEM_LOAD_RESULT); TempBuf.Add(SUCCESS); TempBuf.Add((BYTE)0x01); TempBuf.Add((BYTE)iSlot); TempBuf.Add(pUser->m_UserItem[iSlot].sLevel); TempBuf.Add(pUser->m_UserItem[iSlot].sSid); TempBuf.Add(pUser->m_UserItem[iSlot].sDuration); TempBuf.Add(pUser->m_UserItem[iSlot].sBullNum); TempBuf.Add(pUser->m_UserItem[iSlot].sCount); for(j = 0; j < MAGIC_NUM; j++) TempBuf.Add((BYTE)pUser->m_UserItem[iSlot].tMagic[j]); TempBuf.Add((BYTE)pUser->m_UserItem[iSlot].tIQ); pUser->Send(TempBuf, TempBuf.GetLength()); pUser->m_iCurWeight += g_arItemTable[iEventItemSid]->m_byWeight; pUser->GetRecoverySpeed(); // 酒捞袍 公霸俊 函悼捞 积扁搁 雀汗加档 函券 pUser->m_pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_NORMAL); // pUser->m_pCom->Announce(strMsg.GetBuffer(strMsg.GetLength()), SYSTEM_NORMAL); } return; } // 牢亥持扁俊 角菩窍搁 甘俊 悸泼 GiveItemToMap(pUser->m_pCom, iEvent, TRUE, iEventNum); // 捞亥飘 酒捞袍 } /////////////////////////////////////////////////////////////////////////////////// // 捞亥飘 内靛烙 (扁埃 : 2002斥 4岿 8老 ~~ 2002斥 4岿 13老) // //@@@@@@@@@@@@@@@@@@@@@@@@ void CNpc::SetISerialToItem(ItemList *pItem, int iEventSid) { int i, j = 0; TCHAR strTemp[3]; if(!pItem) return; if(iEventSid < 0 || iEventSid >= g_arAddEventItemTable.GetSize()) return; for(i = 0; i < MAGIC_NUM; i++) { ::ZeroMemory(strTemp, sizeof(strTemp)); strncpy(strTemp,g_arAddEventItemTable[iEventSid]->m_strSerialNum+j, 3); pItem->tMagic[i] = 0; pItem->tMagic[i] = atoi(strTemp); j = j + 4; } } /////////////////////////////////////////////////////////////////////////////////// // 捞亥飘 内靛烙 (扁埃 : 2002斥 4岿 8老 ~~ 2002斥 4岿 13老) // //@@@@@@@@@@@@@@@@@@@@@@@@ int CNpc::GetEventItemNum(COM *pCom) { int i, iRet = -1; DWORD dwCurTick = 0; DWORD dwPreTick = 0; EnterCriticalSection( &(pCom->m_critEvent) ); dwCurTick = GetTickCount(); for(i = 0; i < g_arAddEventItemTable.GetSize(); i++) { int tt = g_arAddEventItemTable[i]->m_tEnd; if(!g_arAddEventItemTable[i]->m_tEnd) // 酒流 冻绢瘤瘤 臼疽促搁 { if(i == 0) dwPreTick = 0; else dwPreTick = g_arAddEventItemTable[i - 1]->m_dwTick; //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@22 if(dwCurTick - dwPreTick >= 60000 * 20) // 24盒捞 逞菌促搁 // if(dwCurTick - dwPreTick >= 1000) // 1盒捞 逞菌促搁 { g_arAddEventItemTable[i]->m_dwTick = dwCurTick; g_arAddEventItemTable[i]->m_tEnd = 1; // 角力荐父 锅龋甫 悸泼秦辑 霖促. if(g_arAddEventItemTable[i]->m_tGiveFlag) iRet = g_arAddEventItemTable[i]->m_sSid; } LeaveCriticalSection( &(pCom->m_critEvent) ); return iRet; } } LeaveCriticalSection( &(pCom->m_critEvent) ); return iRet; } /////////////////////////////////////////////////////////////////////////////////// // 烙矫 捞亥飘 内靛烙 (扁埃 : 2001斥 12岿 29老 ~~ 2002斥 1岿 2老) // //@@@@@@@@@@@@@@@@@@@@@@@@ BOOL CNpc::UpdateEventItem(int sid) { SQLHSTMT hstmt = NULL; SQLRETURN retcode = 0; BOOL bQuerySuccess = TRUE; TCHAR szSQL[8000]; SQLINTEGER iRetInd = SQL_NTS; SQLSMALLINT sRet = 0; ::ZeroMemory(szSQL, sizeof(szSQL)); _sntprintf(szSQL, sizeof(szSQL), TEXT("{call UPDATE_EVENT_ITEM(%d, ?)}"), sid); int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return FALSE; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if( retcode != SQL_SUCCESS ) { return FALSE; } retcode = SQLBindParameter( hstmt, 1 ,SQL_PARAM_OUTPUT,SQL_C_SSHORT, SQL_SMALLINT,0,0, &sRet,0, &iRetInd); if( retcode != SQL_SUCCESS ) { SQLFreeHandle((SQLSMALLINT)SQL_HANDLE_STMT, hstmt); return FALSE; } if (retcode == SQL_SUCCESS) { retcode = SQLExecDirect(hstmt, (unsigned char *)szSQL, SQL_NTS); if (retcode ==SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO) { } else if (retcode==SQL_ERROR) { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); return FALSE; } } else { DisplayErrorMsg( hstmt ); SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); return FALSE; } if (hstmt!=NULL) SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); if( !bQuerySuccess ) return FALSE; if(sRet = 0) return FALSE; return TRUE; } /////////////////////////////////////////////////////////////////////////////////// // 荐府啊 救登绢 乐栏搁 泅犁 HP甫 蜡瘤茄促. // void CNpc::SetFortressState() { for(int i = 0; i < GUILD_REPAIR_MAX_NUM; i++) { if(m_pGuardFortress->m_arRepairDBList[i].sUid == m_sEZone) { if(m_pGuardFortress->m_arRepairDBList[i].sHP < m_sMaxHP) { m_sHP = m_pGuardFortress->m_arRepairDBList[i].sHP; if(m_sHP == 0) m_tRepairDamaged = NPC_DEAD_REPAIR_STATE; else m_tRepairDamaged = NPC_NEED_REPAIR_STATE; break; } } } } /////////////////////////////////////////////////////////////////////////////////// // N_Circle俊 措茄 贸府甫 拌魂茄促. // //void CNpc::SetDamagedInFortressWar(int nDamage, TCHAR *id, int uuid, COM *pCom) void CNpc::SetDamagedInFortressWar(int nDamage, USER *pUser) { int i; int iCount = 0; int index = 0; BOOL bSuccess = FALSE; CBufferEx TempBuf; CNpc *pNpc = NULL; // 辨靛傈阑 脚没茄 辨靛牢瘤 魄窜. if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) return; if(pUser->m_dwGuild <= 0 || !m_pGuardFortress) return; if(pUser->m_tFortressWar == GUILD_WAR_AFFTER) return; if(m_pGuardFortress->m_lUsed == 0) return; // 辨靛傈捞 场车促. if(m_pGuardFortress->m_lChangeUsed == 1) return; for(i = 0; i < GUILDFORTRESS_ATTACK_MAX_NUM; i++) { if(pUser->m_dwGuild == m_pGuardFortress->m_arAttackGuild[i].lGuild) { bSuccess = TRUE; break; } } if(pUser->m_dwGuild == m_pGuardFortress->m_iGuildSid) bSuccess = TRUE; if(!bSuccess) return; // 脚没茄 辨靛啊 酒丛 if(InterlockedCompareExchange((LONG*)&m_lFortressState, (long)1, (long)0) == (long)0) { if(pUser->m_dwGuild == m_pGuardFortress->m_iGuildSid) // 规绢螟篮 { if(m_tNCircle != NPC_NCIRCLE_ATT_STATE) { InterlockedExchange(&m_lFortressState, (LONG)0); return; } m_sHP -= nDamage; // 傍拜螟篮 - 蔼 if(m_sHP <= 0) { m_sHP = m_sMaxHP; m_byColor = 0; m_tNCircle = NPC_NCIRCLE_DEF_STATE; SendFortressNCircleColor(pUser->m_pCom); } } else { if(m_tNCircle != NPC_NCIRCLE_DEF_STATE) { InterlockedExchange(&m_lFortressState, (LONG)0); return; } m_sHP -= nDamage; // 傍拜螟篮 - 蔼 if(m_sHP <= 0) { m_sHP = m_sMaxHP; m_byColor = 1; m_tNCircle = NPC_NCIRCLE_ATT_STATE; SendFortressNCircleColor(pUser->m_pCom); } } iCount = 0; for(i = 0; i < FORTRESS_TARGET_MAX_NUM; i++) { pNpc = NULL; pNpc = GetNpc(m_pGuardFortress->m_arFortressTarget[i].sTargertID); if(pNpc) { if(pNpc->m_tNCircle == NPC_NCIRCLE_ATT_STATE) iCount++; } } if(iCount == FORTRESS_TARGET_MAX_NUM) { if(pUser->m_dwGuild == m_pGuardFortress->m_iGuildSid) { InterlockedExchange(&m_lFortressState, (LONG)0); return; } for(i = 0; i < g_arGuildFortress.GetSize(); i++) { if(!g_arGuildFortress[i]) continue; if(g_arGuildFortress[i]->m_sFortressID == m_pGuardFortress->m_sFortressID) { if(g_arGuildFortress[i]->m_lUsed == 1) { if(InterlockedCompareExchange((LONG*)&g_arGuildFortress[i]->m_lChangeUsed, (long)1, (long)0) == (long)0) { FORTRESSDATAPACKET *pFDP = NULL; pFDP = new FORTRESSDATAPACKET; pFDP->sFortressIndex = i; memset(pFDP->FORTRESS, NULL, CHAR_NAME_LENGTH+sizeof(int)+1); index = strlen(pUser->m_strGuildName); if(index > 0 && index <= CHAR_NAME_LENGTH) memcpy(pFDP->FORTRESS, pUser->m_strGuildName, index ); EnterCriticalSection( &m_CS_FortressData ); RecvFortressData.AddTail(pFDP); nFortressDataCount = RecvFortressData.GetCount(); LeaveCriticalSection( &m_CS_FortressData ); pUser->StoppingTheFortressWar(g_arGuildFortress[i]); // 沥秦柳 矫埃傈捞骨肺 霸烙篮 拌加登绢具 茄促. InterlockedExchange(&g_arGuildFortress[i]->m_lChangeUsed, (LONG)0); } } break; } } } InterlockedExchange(&m_lFortressState, (LONG)0); } return; } //void CNpc::SetDoorDamagedInFortressWar(int nDamage, TCHAR *id, int uuid, COM *pCom) void CNpc::SetDoorDamagedInFortressWar(int nDamage, USER *pUser) { // 傍己傈 扁埃捞扼搁 穿备唱 傍拜窍档废.. 窜 规绢螟篮 救凳 if(!pUser || !m_pGuardFortress) return; if(pUser->m_dwGuild == m_pGuardFortress->m_iGuildSid) return; // 规绢螟捞 绊狼肺 傍拜窍绰巴阑 规瘤茄促. if(m_pGuardFortress->m_lUsed == 0) return; // 辨靛傈捞 场车促. m_sHP -= nDamage; if( m_sHP <= 0 ) { m_sHP = 0; MAP* pMap = g_zone[m_ZoneIndex]; pMap->m_pMap[m_sCurX][m_sCurY].m_lUser = 0; m_NpcState = NPC_DEAD; m_Delay = m_sRegenTime; m_bFirstLive = FALSE; SetMapAfterGuildWar(); SendDead(pUser->m_pCom); } } void CNpc::SendFortressNCircleColor(COM *pCom) { int modify_index = 0; char modify_send[2048]; CBufferEx TempBuf; TempBuf.Add(GUILD_FORTRESS_NCIRCLE); TempBuf.Add((BYTE)0x00); // 秦寸 N_Circle狼 祸捞 函拳 TempBuf.Add((int)(m_sNid + NPC_BAND)); TempBuf.Add(m_tNCircle); SendFortressInsight(pCom, TempBuf, TempBuf.GetLength()); ::ZeroMemory(modify_send, sizeof(modify_send)); FillNpcInfo(modify_send, modify_index, INFO_MODIFY); SendFortressInsight(pCom, modify_send, modify_index); } void CNpc::SetMapTypeBeforeGuildWar(COM *pCom) { int i; int uid = 0; USER *pUser = NULL; CNpc *pNpc = NULL; int x, y; long lNpcUid = 0; // POINT temp1Map[12] = {{-2,-2}, {-2,-1}, {-2,0}, {-1,-1}, {-1,0}, {0,-1}, {0,0},{1,-1},{1,0}, {2,-1}, {2,0}, {2,1}}; POINT temp1Map[16] = {{-3,-2}, {-4,-1}, {-3,-1}, {-2,-1}, {-4,0}, {-3,0}, {-2,0},{-1,0},{0,0}, {1,0}, {2,0}, {1,1}, {2,1}, {2,2},{3,2},{3,3}}; POINT temp2Map[17] = {{-2,-3}, {-1,-3}, {0,-3}, {-1,-2}, {0,-2}, {0,-1}, {0,0},{0,1},{1,1}, {0,2}, {1,2}, {2,2}, {0,3}, {1,3},{2,3},{0,4},{1,4}}; POINT temp3Map[6] = {{-2,0},{-1,0},{0,0},{1,0},{2,0},{2,1}}; // POINT temp2Map[] = {{1,-2}, {1,-1}, {1,0}, {1,1}, {1,2}, {0,-2},{0,-1},{0,0},{0,1},{0,2}}; MAP* pMap = g_zone[m_ZoneIndex]; switch(m_sDimension) { case 1: lNpcUid = m_sNid + NPC_BAND; for(i =0; i < sizeof(temp1Map)/sizeof(POINT); i++) { x = m_sCurX + temp1Map[i].x; y = m_sCurY + temp1Map[i].y; uid = pMap->m_pMap[x][y].m_lUser; if(uid >= USER_BAND && uid < NPC_BAND) // Target 捞 User 牢 版快 { pUser = GetUser(pCom, uid - USER_BAND); if( pUser->m_tIsOP != 1 ) pUser->TownPotal(); } ::InterlockedExchange(&pMap->m_pMap[x][y].m_lUser, lNpcUid); } break; case 2: lNpcUid = m_sNid + NPC_BAND; for(i =0; i < sizeof(temp2Map)/sizeof(POINT); i++) { x = m_sCurX + temp2Map[i].x; y = m_sCurY + temp2Map[i].y; uid = pMap->m_pMap[x][y].m_lUser; if(uid >= USER_BAND && uid < NPC_BAND) // Target 捞 User 牢 版快 { pUser = GetUser(pCom, uid - USER_BAND); if( pUser->m_tIsOP != 1 ) pUser->TownPotal(); } ::InterlockedExchange(&pMap->m_pMap[x][y].m_lUser, lNpcUid); } break; case 3: lNpcUid = m_sNid + NPC_BAND; for(i =0; i < sizeof(temp3Map)/sizeof(POINT); i++) { x = m_sCurX + temp3Map[i].x; y = m_sCurY + temp3Map[i].y; uid = pMap->m_pMap[x][y].m_lUser; if(uid >= USER_BAND && uid < NPC_BAND) // Target 捞 User 牢 版快 { pUser = GetUser(pCom, uid - USER_BAND); if( pUser->m_tIsOP != 1 ) pUser->TownPotal(); } ::InterlockedExchange(&pMap->m_pMap[x][y].m_lUser, lNpcUid); } break; } } void CNpc::SetMapAfterGuildWar() { int i; int uid = 0; USER *pUser = NULL; int x, y; // POINT temp1Map[12] = {{-2,-2}, {-2,-1}, {-2,0}, {-1,-1}, {-1,0}, {0,-1}, {0,0},{1,-1},{1,0}, {2,-1}, {2,0}, {2,1}}; // POINT temp2Map[] = {{1,-2}, {1,-1}, {1,0}, {1,1}, {1,2}, {0,-2},{0,-1},{0,0},{0,1},{0,2}}; POINT temp1Map[16] = {{-3,-2}, {-4,-1}, {-3,-1}, {-2,-1}, {-4,0}, {-3,0}, {-2,0},{-1,0},{0,0}, {1,0}, {2,0}, {1,1}, {2,1}, {2,2},{3,2},{3,3}}; POINT temp2Map[17] = {{-2,-3}, {-1,-3}, {0,-3}, {-1,-2}, {0,-2}, {0,-1}, {0,0},{0,1},{1,1}, {0,2}, {1,2}, {2,2}, {0,3}, {1,3},{2,3},{0,4},{1,4}}; POINT temp3Map[6] = {{-2,0},{-1,0},{0,0},{1,0},{2,0},{2,1}}; MAP* pMap = g_zone[m_ZoneIndex]; switch(m_sDimension) { case 1: for(i =0; i < sizeof(temp1Map)/sizeof(POINT); i++) { x = m_sCurX + temp1Map[i].x; y = m_sCurY + temp1Map[i].y; InterlockedExchange(&pMap->m_pMap[x][y].m_lUser, 0); } break; case 2: for(i =0; i < sizeof(temp2Map)/sizeof(POINT); i++) { x = m_sCurX + temp2Map[i].x; y = m_sCurY + temp2Map[i].y; InterlockedExchange(&pMap->m_pMap[x][y].m_lUser, 0); } break; case 3: for(i =0; i < sizeof(temp3Map)/sizeof(POINT); i++) { x = m_sCurX + temp3Map[i].x; y = m_sCurY + temp3Map[i].y; InterlockedExchange(&pMap->m_pMap[x][y].m_lUser, 0); } break; } } void CNpc::SendFortressInsight(COM *pCom, TCHAR *pBuf, int nLength) { if(nLength <= 0 || nLength >= SEND_BUF_SIZE) return; int insight_range = 10; int sx = m_sCurX / SIGHT_SIZE_X; int sy = m_sCurY / SIGHT_SIZE_Y; int min_x = (sx-8)*(SIGHT_SIZE_X); if( min_x < 0 ) min_x = 0; int max_x = (sx+9)*(SIGHT_SIZE_X); int min_y = (sy-8)*(SIGHT_SIZE_Y); if( min_y < 0 ) min_y = 0; int max_y = (sy+9)*(SIGHT_SIZE_Y); MAP* pMap = g_zone[m_ZoneIndex]; if( !pMap ) return; if( max_x >= pMap->m_sizeMap.cx ) max_x = pMap->m_sizeMap.cx - 1; if( max_y >= pMap->m_sizeMap.cy ) max_y = pMap->m_sizeMap.cy - 1; int temp_uid; USER* pUser = NULL; for( int i = min_x; i < max_x; i++ ) { for( int j = min_y; j < max_y; j++ ) { temp_uid = pMap->m_pMap[i][j].m_lUser; if(temp_uid < USER_BAND || temp_uid >= NPC_BAND) continue; else temp_uid -= USER_BAND; if( temp_uid >= 0 && temp_uid < MAX_USER ) { pUser = pCom->GetUserUid(temp_uid); if(pUser == NULL) continue; if( pUser->m_state == STATE_GAMESTARTED ) { if( pUser->m_curx == i && pUser->m_cury == j && pUser->m_curz == m_sCurZ ) { Send( pUser, pBuf, nLength ); } } } } } } void CNpc::TestCode(COM *pCom, USER *pUser) { int i; int uid = 0; CNpc *pNpc = NULL; int x, y; long lNpcUid = 0; if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) return; POINT temp1Map[12] = {{-2,-2}, {-2,-1}, {-2,0}, {-1,-1}, {-1,0}, {0,-1}, {0,0},{1,-1},{1,0}, {2,-1}, {2,0}, {2,1}};// server // POINT temp2Map[8] = {{0,0}, {0,1}, {0,2}, {0,3}, {-1,0}, {-1,1}, {-1,2},{-1,3}}; // POINT temp1Map[12] = {{147,1183}, {148,1184}, {149,1185}, {149,1183}, {150,1184}, {150,1182}, {151,1183},{151,1181},{152,1182}, {152,1180}, {153,1181}, {154,1182}}; POINT temp2Map[] = {{-1,-3}, {0,-2}, {1,-1}, {2,0}, {3,1}, {-2,-2}, {-1,-1}, {0,0}, {1,1}, {2,2}}; // client MAP* pMap = g_zone[m_ZoneIndex]; CPoint temp = ConvertToClient(m_sCurX, m_sCurY); switch(m_sDimension) { case 1: lNpcUid = m_sNid + NPC_BAND; for(i =0; i < 12; i++) { x = m_sCurX + temp1Map[i].x; y = m_sCurY + temp1Map[i].y; CString strMsg = _T(""); CPoint pt = ConvertToClient(x, y); // CPoint pt = ConvertToServer(temp1Map[i].x, temp1Map[i].y); strMsg.Format("1Luinet locked door x = %d, y = %d", pt.x - temp.x, pt.y - temp.y); // pUser->NormalChat(strMsg.GetBuffer(strMsg.GetLength())); // ::InterlockedExchange(&pMap->m_pMap[x][y].m_lUser, lNpcUid); } break; case 2: lNpcUid = m_sNid + NPC_BAND; for(i =0; i < 10; i++) { x = temp.x + temp2Map[i].x; y = temp.y + temp2Map[i].y; CPoint pt = ConvertToServer(x, y); CString strMsg = _T(""); // CPoint pt = ConvertToServer(temp1Map[i].x, temp1Map[i].y); strMsg.Format("1Sanad locked door x = %d, y = %d", pt.x - m_sCurX, pt.y - m_sCurY); // pUser->NormalChat(strMsg.GetBuffer(strMsg.GetLength())); } break; } } int CNpc::PsiAttack(COM *pCom) //NPC魔法攻击 { DWORD dwExp = 0; int nDamage = 0; int nTempHP = 0; USER* pUser = NULL; CNpc* pNpc = NULL; BYTE tWeaponClass = 0; BOOL bCanUseSkill = FALSE; int bSuccessSkill[SKILL_NUM] = {FALSE, FALSE, FALSE, FALSE, FALSE}; int nPsiRange = 0; int nTPosX = 0; int nTPosY = 0; int nDist = 100; short sNeedPP = 25000; BYTE tPsiRegi = 0; DWORD dwPsiCast = 0; BOOL bPsiSuccess = FALSE; int index = 0; int delay = -1; int nTargetID = m_Target.id; // Target ID 甫 掘绰促. BYTE byPsi = m_byPsi; // Psionic sid 甫 掘绰促. int nPsiX = -1; // Teleport且 困摹 int nPsiY = -1; CPoint ptPsi(-1, -1); if( byPsi < 0 || byPsi >= g_arMonsterPsi.GetSize() ) return -1; if( nTargetID < USER_BAND || nTargetID >= INVALID_BAND ) return-1; // 肋给等 Target 捞搁 return pUser = GetUser( pCom, nTargetID - USER_BAND ); if( !pUser ) return -1; CMonsterPsi* pMagic = g_arMonsterPsi[(int)byPsi]; if( !pMagic ) return -1; // 荤沥芭府 拌魂 ------------------------------------------------------------------------// if( !IsCloseTarget( pCom, (int)pMagic->m_byRange ) ) return -1; short damage, result; if(pMagic->m_sSid != 0) { damage = myrand( pMagic->m_sMinDmg, pMagic->m_sMaxDmg ); result = damage * m_sVOL - pUser->GetUserSpellDefence(); /* //如果存在护法,则攻击护法 if(pUser->m_tHuFaType &&pUser->m_nHuFaHP>0) { int nID = pUser->m_uid+USER_BAND+USER_HUFA_BAND; //伤害数据 pUser->SendDamageNum(0,nID,(short)result); SendAttackSuccess(pCom, nID, 0, pUser->m_nHuFaHP, pUser->m_nHuFaMaxHP);//yskang 0.3 if(nDamage > 0) pUser->SetHuFaDamage(result); if(pUser->m_nHuFaHP>0) { pUser->HuFaAttack(m_sNid+NPC_BAND); } CBufferEx TempBuf; TempBuf.Add(PSI_ATTACK_RESULT); TempBuf.Add(SUCCESS); TempBuf.Add( (BYTE)33 ); // Psionic sid TempBuf.Add( m_sNid + NPC_BAND ); TempBuf.Add( nID ); SendExactScreen(pCom, TempBuf, TempBuf.GetLength()); return 1200; }*/ pUser->SetDamage((int)result); // pUser->SetFaNu(result); pUser->SendDamageNum(0,pUser->m_uid+USER_BAND,result); if(pUser->m_lDeadUsed == 1) { InitTarget(); m_NpcState = NPC_STANDING; delay = m_sStandTime; if(m_NpcVirtualState == NPC_STANDING) { if(m_sPid == 179) pUser->GetLevelDownExp(FALSE, -1, TRUE,m_strName); // 魔傍侩 阁胶磐老版快 版氰摹 1%皑家 else pUser->GetLevelDownExp(FALSE, -1, FALSE,m_strName); // 版氰摹客 弊寇 函拳樊甫 馆康茄促. } } else { if(m_sPid == 204) //黑暗死神 变魔法招怪 { int iRandom = myrand(1, 10000); if(iRandom <= 3000) { CBufferEx TempBuf; TempBuf.Add(PSI_ATTACK_RESULT); TempBuf.Add(SUCCESS); TempBuf.Add( (BYTE)33 ); // Psionic sid TempBuf.Add( m_sNid + NPC_BAND ); TempBuf.Add( nTargetID ); SendExactScreen(pCom, TempBuf, TempBuf.GetLength()); delay = (int)1200; CPoint pt(-1, -1); pt = pUser->FindNearAvailablePoint_S(m_sCurX, m_sCurY); for(int i = 0; i< 2;i++){ pUser->SummonQuestMonster(174,m_sCurZ,pt.x,pt.y); pUser->SummonQuestMonster(174,m_sCurZ,pt.x,pt.y); pUser->SummonQuestMonster(174,m_sCurZ,pt.x,pt.y); pUser->SummonQuestMonster(174,m_sCurZ,pt.x,pt.y); pUser->SummonQuestMonster(174,m_sCurZ,pt.x,pt.y); pUser->SummonQuestMonster(174,m_sCurZ,pt.x,pt.y); pUser->SummonQuestMonster(174,m_sCurZ,pt.x,pt.y); } return (int)delay; } } switch(pMagic->m_sSid) { case 8: case 27: case 31: case 32: GetWideRangeAttack( pCom, pUser->m_curx, pUser->m_cury, (int)damage, nTargetID - USER_BAND ); break; case 2: // 筋绰 付过 case 5: case 28: case 37: case 38: case 40: case 41: case 42: case 43: pUser->SetFireDamage(); break; case 4: case 10: case 21: pUser->SetColdDamage(); break; case 24: // pUser->SetConFusion(); GetWideRangeAttack( pCom, pUser->m_curx, pUser->m_cury, (int)damage, nTargetID - USER_BAND ); break; } } } CBufferEx TempBuf; TempBuf.Add(PSI_ATTACK_RESULT); TempBuf.Add(SUCCESS); TempBuf.Add( (BYTE)pMagic->m_sPid ); // Psionic sid TempBuf.Add( m_sNid + NPC_BAND ); TempBuf.Add( nTargetID ); // SendInsight( pCom, TempBuf, TempBuf.GetLength()); SendExactScreen(pCom, TempBuf, TempBuf.GetLength()); delay = (int)pMagic->m_sCasting; return (int)delay; } void CNpc::GetWideRangeAttack(COM* pCom, int x, int y, int damage, int except_uid) // 瘤陛篮 傍拜父 贸府... { int dir[9][2]; int ix, iy; int nTarget = 0; int nDamage = 0; double result = 0; USER* pUser = NULL; MAP* pMap = g_zone[m_ZoneIndex]; if(!pMap) return; dir[0][0] = 0; dir[0][1] = 0; // dir[1][0] = -1; dir[1][1] = 0; // dir[2][0] = -1; dir[2][1] = 1; // dir[3][0] = 0; dir[3][1] = 1; // dir[4][0] = 1; dir[4][1] = 1; // dir[5][0] = 1; dir[5][1] = 0; // dir[6][0] = 1; dir[6][1] = -1; // dir[7][0] = 0; dir[7][1] = -1; // dir[8][0] = -1; dir[8][1] = -1; // for(int i = 1; i < 9; i++) { ix = x + dir[i][0]; iy = y + dir[i][1]; if(ix < 0) ix = 0; if(iy < 0) iy = 0; if(ix >= pMap->m_sizeMap.cx) ix = pMap->m_sizeMap.cx - 1; if(iy >= pMap->m_sizeMap.cy) iy = pMap->m_sizeMap.cy - 1; nTarget = pMap->m_pMap[ix][iy].m_lUser; if(nTarget >= USER_BAND && nTarget < NPC_BAND) // USER { pUser = GetUser( pCom, nTarget - USER_BAND); // User Pointer 甫 掘绰促. if(pUser == NULL || pUser->m_state != STATE_GAMESTARTED) continue; // 肋给等 USER 捞搁 府畔 if(pUser->m_bLive == USER_DEAD) continue; // Target User 啊 捞固 磷绢乐栏搁 府畔 if(pUser->m_uid == except_uid ) continue; // 吝缴俊 乐绰 蜡历绰 拌魂窍瘤 臼绰促 result = (double)damage * (double)( m_sVOL * 20 ) / (double)( pUser->m_sMagicVOL * 15 + pUser->m_DynamicUserData[MAGIC_PSI_RESIST_UP] + m_sVOL * 20 ); pUser->SetDamage((int)result); if(pUser->m_sHP > 0) // 混篮 版快 傈扁单固瘤 眠啊 { // pUser->SetColdDamage(); } else { // IsChangeCityRank(pUser); if(m_sPid == 179) pUser->GetLevelDownExp(FALSE, -1, TRUE,m_strName); // 魔傍侩 阁胶磐老版快 版氰摹 1%皑家 else pUser->GetLevelDownExp(USER_PK, -1, FALSE,m_strName); // 版氰摹客 弊寇 函拳樊甫 馆康茄促. } } /* else if(nTarget >= NPC_BAND) // NPC { pNpc = GetNpc(nTarget - NPC_BAND); // NPC Point 甫 掘绰促. if(pNpc == NULL) continue; // 肋给等 NPC 捞搁 府畔 if(pNpc->m_NpcState == NPC_DEAD || pNpc->m_tNpcType != NPCTYPE_MONSTER) continue; // NPC 啊 捞固 磷绢 乐栏搁 府畔 if(pNpc->m_sHP <= 0) continue; nDamage = (int)(damage * ((double)m_sMagicVOL / (m_sMagicVOL + pNpc->m_sVOL))); nDamage = (int)((double)nDamage/2 + 0.5); // 单固瘤狼 50%父 甸绢埃促. if(pNpc->SetDamage(nDamage, m_strUserID, m_uid + USER_BAND, m_pCom) == FALSE) { if(m_tGuildHouseWar == GUILD_WARRING && pNpc->m_NpcVirtualState == NPC_WAIT) { CheckGuildHouseWarEnd(); } pNpc->SendExpToUserList(m_pCom); // 版氰摹 盒硅!! pNpc->SendDead(m_pCom); int diffLevel = abs(m_sLevel - pNpc->m_byClassLevel); if(difflevel < 30) { CheckMaxValue(m_dwXP, 1); // 各捞 磷阑锭父 1 刘啊! SendXP(); } } else // 混篮 版快 傈扁单固瘤 眠啊 { // pNpc->SetColdDamage(); } } */ } } int CNpc::AreaAttack(COM *pCom) { if(!pCom) return 10000; if(m_tNpcType == NPCTYPE_GUARD) return -1; if(m_tNpcType == NPCTYPE_GUILD_GUARD) return -1; int nStandingTime = m_sStandTime; // 醚扁拌凯 老锭绰 鸥百苞狼 芭府拌魂阑 崔府秦具 茄促. if(IsCloseTarget(pCom, m_byRange) == FALSE)// Check Code (窜瘤 规绢利牢 螟搁俊辑 持篮 内靛) { m_NpcState = NPC_TRACING; // 傍拜窍绊 档噶啊绰 蜡历甫 蝶扼 棱扁困秦(馆览阑 粱歹 狐福霸) TRACE("AreaAttack - 芭府 钢绢辑 角菩\n"); return -1; // IsCloseTarget()俊 蜡历 x, y蔼阑 盎脚窍绊 Delay = 0栏肺 淋 } USER* pUser = NULL; int nRange = 1; // 傍拜 裹困 : 1 - 林困 8沫, 2 - 林困 24沫... int nTargetCount = 0; int target_uid = -1; int center_x = m_sCurX; // 傍拜 裹困狼 吝缴痢 : 吝缴捞 傍拜窍绰 NPC老 荐档 乐绊 绢恫 痢阑 备且 荐档 乐促. int center_y = m_sCurY; // 泅犁绰 磊扁 林困 傍拜栏肺 茄沥 switch( (int)m_tSPATRange ) { case 0: case 1: nRange = 2; center_x = m_sCurX; center_y = m_sCurY; break; case 2: nRange = 2; center_x = m_sCurX; center_y = m_sCurY; break; case 3: nRange = 1; center_x = m_Target.x; center_y = m_Target.y; break; case 4: nRange = 2; center_x = m_Target.x; center_y = m_Target.y; break; default: nRange = 1; center_x = m_sCurX; center_y = m_sCurY; break; } MAP* pMap = g_zone[m_ZoneIndex]; if(!pMap) { TRACE("AreaAttack - 甘绝绢辑 角菩\n"); return -1; } int min_x = center_x - nRange; if( min_x < 0 ) min_x = 0; int min_y = center_y - nRange; if( min_y < 0 ) min_y = 0; int max_x = center_x + nRange; int max_y = center_y + nRange; if(max_x >= pMap->m_sizeMap.cx) max_x = pMap->m_sizeMap.cx - 1; if(max_y >= pMap->m_sizeMap.cy) max_y = pMap->m_sizeMap.cy - 1; TargetUser tuser[25]; int nAvoid = 0; int iRandom = 0; int determine = 0; int iDexHitRate = 0, iLevelHitRate = 0; short sTempHP = 0; int nHit = 0; BOOL bIsHit = FALSE; BOOL bIsCritical = FALSE; int nDamage = 0; int nDefense = 0; int nID = m_Target.id; // Target 阑 备茄促. // 疙吝咯何 魄窜 函荐 檬扁拳 bIsHit = FALSE; for( int ix = min_x; ix <= max_x; ix++ ) { for( int iy = min_y; iy <= max_y; iy++ ) { target_uid = pMap->m_pMap[ix][iy].m_lUser; if( target_uid < USER_BAND || target_uid >= NPC_BAND ) { continue; } pUser = GetUser(pCom, target_uid - USER_BAND); if( !pUser ) continue; if( pUser->m_bLive != USER_LIVE ) continue; if( ix != pUser->m_curx || iy != pUser->m_cury ) continue; if(pUser->m_state == STATE_DISCONNECTED) continue; if(pUser->m_tIsOP == 1 ) continue; if(pUser->m_bPShopOpen == TRUE) continue; // 雀乔蔼 拌魂 nAvoid = pUser->GetAvoid(); // 疙吝咯何 魄窜 iRandom = (int)((double)XdY(1, 1000) / 10 + 0.5); iDexHitRate = (int)( 30.0 * ( (double)m_sDEX/(m_sDEX + pUser->m_sMagicDEX) ) + 15.0 ); iLevelHitRate = (int)( 70.0 * ( (double)m_byClassLevel/(pUser->m_sLevel + m_byClassLevel) ) + 15.0); determine = iDexHitRate + iLevelHitRate - (nAvoid+pUser->m_Avoid); if(determine < ATTACK_MIN) determine = ATTACK_MIN; // 弥家 20 else if(determine > ATTACK_MAX) determine = ATTACK_MAX; // 弥措 if(iRandom < determine) bIsHit = TRUE; // 疙吝 // 傍拜 固胶 if(bIsHit == FALSE) { TRACE("AreaAttack - 傍拜 固胶\n"); continue; } // 疙吝捞搁 //Damage 贸府 ----------------------------------------------------------------// nDamage = GetFinalDamage(pUser); // 弥辆 措固瘤 if(nDamage > 0) { pUser->SetDamage(nDamage); pUser->SendDamageNum(0,pUser->m_uid+USER_BAND,nDamage); } // 规绢螟 郴备档 皑家 pUser->SendDamagedItem(nDamage); // if(pUser->m_bLive == USER_DEAD || pUser->m_sHP <= 0)//@@@ 唱吝俊 绊魔 if(pUser->m_lDeadUsed == 1) { if(m_NpcVirtualState == NPC_STANDING) { if(m_sPid == 179) pUser->GetLevelDownExp(FALSE, -1, TRUE,m_strName); // 魔傍侩 阁胶磐老版快 版氰摹 1%皑家 else pUser->GetLevelDownExp(FALSE, -1, FALSE,m_strName); // 版氰摹客 弊寇 函拳樊甫 馆康茄促. } } tuser[nTargetCount].iUid = target_uid; tuser[nTargetCount].sHP = pUser->m_sHP; tuser[nTargetCount].sMaxHP = pUser->m_sMagicMaxHP; pUser->SendHP(); nTargetCount++; if( nTargetCount >= 25 ) break; } if( nTargetCount >= 25 ) break; } if( !nTargetCount ) { TRACE("AreaAttack - 裹困 救俊 蜡历 绝绢辑 角菩\n"); // return -1; } CBufferEx TempBuf; // 2a 0 0 0 ca 1 1 9f 5b 0 0 6a 2c 0 0 0 0 TempBuf.Add(AREA_ATTACK_RESULT); TempBuf.Add(ATTACK_SUCCESS); TempBuf.Add( (byte)1 ); TempBuf.Add((int)(m_sNid + NPC_BAND)); for(int i = 0; i < 1; i++ ) { TempBuf.Add( (int)tuser[i].iUid ); TempBuf.Add( (short)0 ); TempBuf.Add( (short)0 ); } SendInsight(pCom, TempBuf, TempBuf.GetLength()); // SendExactScreen(pCom, TempBuf, TempBuf.GetLength()); TRACE("AreaAttack - 己傍\n"); return m_sAttackDelay; } void CNpc::GiveEventItemNewToUser(USER *pUser) { if( !pUser ) return; if( pUser->m_state != STATE_GAMESTARTED ) return; // if( pUser->m_iDisplayType == 6 && pUser->m_sLevel > 25) return; //yskang 0.5 if( pUser->m_iDisplayType == 6) return; //yskang 0.5 int i; CEventItemNew* pNewItem = NULL; BOOL bFlag = FALSE; int sItemSid = -1; BYTE tItemQuality = 0; BYTE tItemWear = 0; int j; int iSlot = -1; SYSTEMTIME time; GetLocalTime( &time ); CString strMsg = _T(""); MYSHORT upper; upper.i = 0; MYINT lower; lower.i = 0; for( i = 0; i < g_arEventItemNew.GetSize(); i++ ) { if( g_arEventItemNew[i] ) { pNewItem = g_arEventItemNew[i]; if( ::InterlockedCompareExchange( (long*)&(pNewItem->m_lGive), (long)0, (long)1 ) == (long)0 ) continue; if( pNewItem->m_sSid != NPC_EVENT_LOTTO ) { // 汗鼻捞 酒囱 版快 蜡历乏苞 各狼 乏狼 瞒捞啊 25乏阑 檬苞窍搁 林瘤 臼绰促. if(abs(m_byClassLevel - pUser->m_sLevel) > 25) { if(m_byClassLevel < pUser->m_sLevel) return; } } else { // 汗鼻篮 蜡历乏苞 各狼 乏狼 瞒捞啊 40乏阑 檬苞窍搁 林瘤 臼绰促. if(abs(m_byClassLevel - pUser->m_sLevel) > 40) { if(m_byClassLevel < pUser->m_sLevel) return; } } ////////////////////////////////////////////////////////////////////// // 眠啊登绰 捞亥飘 酒捞袍捞 乐栏搁 酒贰俊 眠啊茄促. ////////////////////////////////////////////////////////////////////// // 采官备聪 捞亥飘 if( pNewItem->m_sSid == NPC_EVENT_FLOWER ) { if( time.wYear == 2002 && time.wMonth == 5 && ( time.wDay >= 1 || time.wDay <= 5 ) ) { sItemSid = pNewItem->m_sSid; tItemQuality = 0; } else { return; } } if( pNewItem->m_sSid == NPC_EVENT_LOTTO ) { if( time.wYear == 2002 && ( ( time.wMonth == 5 && time.wDay >= 16 ) || ( time.wMonth == 6 && time.wDay <= 22 ) ) ) { sItemSid = pNewItem->m_sSid; tItemQuality = pNewItem->m_tQuality; } else { return; } if( pUser->m_sLevel < 25 ) // 捞亥飘 汗鼻篮 25饭骇 固父篮 林瘤 臼绰促. { return; } } ///////////////////////////////////////////////////////////////////// if( sItemSid < 0 || sItemSid >= g_arItemTable.GetSize() ) return; CItemTable* pItemTable = g_arItemTable[sItemSid]; iSlot = pUser->GetEmptySlot(INVENTORY_SLOT); if( iSlot != -1 ) { if(pUser->m_iMaxWeight >= pUser->m_iCurWeight + pItemTable->m_byWeight) bFlag = TRUE; } switch( sItemSid ) { case NPC_EVENT_FLOWER: if(bFlag) { strMsg.Format(IDS_EVENT_FLOWER, pUser->m_strUserID); } break; case NPC_EVENT_LOTTO: if(bFlag) { strMsg.Format(IDS_EVENT_LOTTO); } break; default: return; } ItemList newItem; pUser->ReSetItemSlot( &newItem ); newItem.tType = TYPE_ITEM; newItem.sLevel = pItemTable->m_byRLevel; newItem.sSid = sItemSid; newItem.sCount = 1; newItem.sDuration = pItemTable->m_sDuration; newItem.sBullNum = pItemTable->m_sBullNum; newItem.tIQ = tItemQuality; newItem.iItemSerial = 0; for( j = 0; j < MAGIC_NUM; j++ ) newItem.tMagic[j] = 0; // 捞亥飘 酒捞袍 林扁 抛捞喉俊辑 巢篮 俺荐甫 茄俺 临咯霖促. pNewItem->m_sRemain--; if( pNewItem->m_sRemain < 0 ) { pNewItem->m_sRemain = 0; } if( !UpdateEventItemNewRemain( pNewItem ) ) { pNewItem->m_sRemain++; ::InterlockedExchange( &(pNewItem->m_lGive), 1 ); return; } if( pNewItem->m_tSerialExist != 255 ) // 矫府倔 锅龋甫 何咯秦具 窍绰 惑炔捞扼搁 { upper.i = pNewItem->m_tSerialExist; // 10000 锅措 锅龋 lower.i = pNewItem->m_sRemain; newItem.tMagic[0] = upper.b[0]; newItem.tMagic[1] = upper.b[1]; newItem.tMagic[2] = lower.b[0]; newItem.tMagic[3] = lower.b[1]; newItem.tMagic[4] = lower.b[2]; newItem.tMagic[5] = lower.b[3]; } // bFlag - 牢亥俊 后浇吩捞 乐绊, 公霸力茄俊 吧府瘤 臼疽阑锭 TRUE捞促. if(bFlag) { pUser->m_UserItem[iSlot].tType = newItem.tType; pUser->m_UserItem[iSlot].sLevel = newItem.sLevel; pUser->m_UserItem[iSlot].sSid = newItem.sSid; pUser->m_UserItem[iSlot].sCount = newItem.sCount; pUser->m_UserItem[iSlot].sDuration = newItem.sDuration; pUser->m_UserItem[iSlot].sBullNum = newItem.sBullNum; pUser->m_UserItem[iSlot].tIQ = newItem.tIQ; pUser->m_UserItem[iSlot].iItemSerial = newItem.iItemSerial; for( j = 0; j < MAGIC_NUM; j++ ) pUser->m_UserItem[iSlot].tMagic[j] = newItem.tMagic[j]; CBufferEx TempBuf; TempBuf.Add(ITEM_LOAD_RESULT); TempBuf.Add(SUCCESS); TempBuf.Add((BYTE)0x01); TempBuf.Add((BYTE)iSlot); TempBuf.Add(pUser->m_UserItem[iSlot].sLevel); TempBuf.Add(pUser->m_UserItem[iSlot].sSid); TempBuf.Add(pUser->m_UserItem[iSlot].sDuration); TempBuf.Add(pUser->m_UserItem[iSlot].sBullNum); TempBuf.Add(pUser->m_UserItem[iSlot].sCount); for(j = 0; j < MAGIC_NUM; j++) TempBuf.Add((BYTE)pUser->m_UserItem[iSlot].tMagic[j]); TempBuf.Add((BYTE)pUser->m_UserItem[iSlot].tIQ); pUser->Send(TempBuf, TempBuf.GetLength()); pUser->m_iCurWeight += pItemTable->m_byWeight; pUser->GetRecoverySpeed(); // 酒捞袍 公霸俊 函悼捞 积扁搁 雀汗加档 函券 switch( sItemSid ) { case NPC_EVENT_FLOWER: pUser->m_pCom->Announce((LPTSTR)(LPCTSTR)strMsg, SYSTEM_NORMAL); // pUser->m_pCom->Announce(strMsg.GetBuffer(strMsg.GetLength()), SYSTEM_NORMAL); break; case NPC_EVENT_LOTTO: pUser->SendSystemMsg( IDS_EVENT_LOTTO, SYSTEM_NORMAL, TO_ME); break; default: return; } strMsg.Format("(%04d-%02d-%02d %02d:%02d:%02d) %s - Get %d Item(%d)\r\n", time.wYear, time.wMonth, time.wDay, time.wHour, time.wMinute, time.wSecond, pUser->m_strUserID, newItem.sSid, upper.i * 10000 + lower.i ); EnterCriticalSection( &m_CS_EventItemLogFileWrite ); g_fpEventItem.Write( strMsg, strMsg.GetLength() ); LeaveCriticalSection( &m_CS_EventItemLogFileWrite ); } else { // 牢亥持扁俊 角菩窍搁 甘俊 悸泼 GiveItemToMap( pUser->m_pCom, &newItem ); strMsg.Format("(%04d-%02d-%02d %02d:%02d:%02d) %s - Map %d Item(%d)\r\n", time.wYear, time.wMonth, time.wDay, time.wHour, time.wMinute, time.wSecond, pUser->m_strUserID, newItem.sSid, upper.i * 10000 + lower.i ); EnterCriticalSection( &m_CS_EventItemLogFileWrite ); g_fpEventItem.Write( strMsg, strMsg.GetLength() ); LeaveCriticalSection( &m_CS_EventItemLogFileWrite ); } } } } void CNpc::GiveItemToMap(COM *pCom, ItemList *pItem) { CPoint pt = FindNearRandomPointForItem(m_sCurX, m_sCurY); // 泅犁 磊扁谅钎甫 力寇茄 24沫 if(pt.x <= -1 || pt.y <= -1) return; if(pt.x >= g_zone[m_ZoneIndex]->m_sizeMap.cx || pt.y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) return; if( InterlockedCompareExchange((LONG*)&g_zone[m_ZoneIndex]->m_pMap[pt.x][pt.y].m_FieldUse, (long)1, (long)0) == (long)0 ) { ItemList* pNewItem = new ItemList; memcpy( pNewItem, pItem, sizeof( ItemList ) ); // 秦寸 蜡历俊霸 舅赴促. //pCom->DelThrowItem(); pCom->SetThrowItem( pNewItem, pt.x, pt.y, m_ZoneIndex ); ::InterlockedExchange(&g_zone[m_ZoneIndex]->m_pMap[pt.x][pt.y].m_FieldUse, 0); } } BOOL CNpc::UpdateEventItemNewRemain(CEventItemNew *pEventItem) { SQLSMALLINT sRet = -1; SQLINTEGER iRetInd = SQL_NTS; SQLHSTMT hstmt = NULL; SQLRETURN retcode; TCHAR szSQL[8000]; ::ZeroMemory(szSQL, sizeof(szSQL)); _sntprintf(szSQL, sizeof(szSQL), TEXT("{call update_event_item_new_remain ( %d, %d, ? )}"), pEventItem->m_sIndex, pEventItem->m_sRemain ); int db_index = 0; CDatabase* pDB = g_DBNew[AUTOMATA_THREAD].GetDB( db_index ); if( !pDB ) return FALSE; retcode = SQLAllocHandle( (SQLSMALLINT)SQL_HANDLE_STMT, pDB->m_hdbc, &hstmt ); if( retcode != SQL_SUCCESS ) { g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return FALSE; } int i = 1; SQLBindParameter( hstmt, i++ ,SQL_PARAM_OUTPUT,SQL_C_SSHORT, SQL_SMALLINT, 0, 0, &sRet, 0, &iRetInd); retcode = SQLExecDirect( hstmt, (unsigned char*)szSQL, sizeof(szSQL)); if( retcode == SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO ) { } else { DisplayErrorMsg(hstmt); retcode = SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); return FALSE; } retcode = SQLFreeHandle( (SQLSMALLINT)SQL_HANDLE_STMT, hstmt); g_DBNew[AUTOMATA_THREAD].ReleaseDB(db_index); if( sRet == -1 ) return FALSE; return TRUE; } void CNpc::UserListSort() { int i, j; int total = 0; ItemUserRightlist temp; for(i = 0; i < NPC_HAVE_USER_LIST; i++) { m_iHaveItemUid[i].uid = -1; m_iHaveItemUid[i].nDamage = 0; if( m_DamagedUserList[i].nDamage > 0 ) { m_iHaveItemUid[i].uid = m_DamagedUserList[i].iUid; m_iHaveItemUid[i].nDamage = m_DamagedUserList[i].nDamage; } } for(i = 2; i < NPC_HAVE_USER_LIST; i++) { temp.uid = m_iHaveItemUid[i].uid; temp.nDamage = m_iHaveItemUid[i].nDamage; j = i; while(m_iHaveItemUid[j-1].nDamage < temp.nDamage) { m_iHaveItemUid[j].uid = m_iHaveItemUid[j-1].uid; m_iHaveItemUid[j].nDamage = m_iHaveItemUid[j-1].nDamage; j--; if(j <= 0) break; } m_iHaveItemUid[j].uid = temp.uid; m_iHaveItemUid[j].nDamage = temp.nDamage; } for(i = 0; i < ITEM_USER_RIGHT_NUM; i++) { if(m_iHaveItemUid[i].nDamage > 0) total += m_iHaveItemUid[i].nDamage; } if(total <= 0) total = 1; for(i = 0; i < ITEM_USER_RIGHT_NUM; i++) { j = 0; j = (int)( (m_iHaveItemUid[i].nDamage * 100)/total ); if(j > 100) j = 100; else if(j <= 0) j = 1; m_iHaveItemUid[i].nDamage = (BYTE)j; } } DWORD CNpc::GetItemThrowTime() { DWORD dwCurTime = 0; SYSTEMTIME SaveTime; GetLocalTime(&SaveTime); WORD wTemp = 0; DWORD dwYear = 0; DWORD dwMon = 0; DWORD dwDay = 0; DWORD dwHour = 0; DWORD dwMin = 0; DWORD dwSecond = 0; // 2 Byte 滚府绊 wTemp = SaveTime.wYear << 12; // 惑困 4 Byte wTemp = wTemp >> 12; dwYear = (DWORD)wTemp; dwYear = dwYear << 26; wTemp = SaveTime.wMonth << 12; // 4 Byte wTemp = wTemp >> 12; dwMon = (DWORD)wTemp; dwMon = dwMon << 22; wTemp = SaveTime.wDay << 11; // 5 Byte wTemp = wTemp >> 11; dwDay = (DWORD)wTemp; dwDay = dwDay << 17; wTemp = SaveTime.wHour << 11; // 5 Byte wTemp = wTemp >> 11; dwHour = (DWORD)wTemp; dwHour = dwHour << 12; wTemp = SaveTime.wMinute << 10; // 6 Byte wTemp = wTemp >> 10; dwMin = (DWORD)wTemp; dwMin = dwMin << 6; wTemp = SaveTime.wSecond << 10; // 6 Byte wTemp = wTemp >> 10; dwSecond = (DWORD)wTemp; dwCurTime = dwYear^dwMon^dwDay^dwHour^dwMin^dwSecond; return dwCurTime; } BOOL CNpc::CheckUserForNpc_Live(int x, int y) { return TRUE; int min_x, min_y, max_x, max_y; min_x = m_sCurX - m_bySearchRange; if( min_x < 0 ) min_x = 0; min_y = m_sCurY - m_bySearchRange; if( min_y < 0 ) min_y = 0; max_x = m_sCurX + m_bySearchRange; max_y = m_sCurY + m_bySearchRange; if(max_x >= g_zone[m_ZoneIndex]->m_sizeMap.cx) max_x = g_zone[m_ZoneIndex]->m_sizeMap.cx - 2; if(max_y >= g_zone[m_ZoneIndex]->m_sizeMap.cy) max_y = g_zone[m_ZoneIndex]->m_sizeMap.cy - 2; int ix, iy; int target_uid; int tempLevel = 0, oldLevel = 1000; for(ix = min_x; ix <= max_x; ix++) { for(iy = min_y; iy <= max_y; iy++) { target_uid = m_pOrgMap[ix][iy].m_lUser; if( target_uid >= USER_BAND && target_uid < NPC_BAND ) return FALSE; } } return TRUE; } ///////////////////////////////////////////////////////////////////////////////////////////// // Summon 栏肺 牢秦 函版等 困摹沥焊甫 林函俊 焊辰促. // void CNpc::SendNpcInfoBySummon(COM *pCom) { int min_x = 0, min_y = 0; int max_x = 0, max_y = 0; int sx = m_sCurX / SIGHT_SIZE_X; int sy = m_sCurY / SIGHT_SIZE_Y; int delete_index = 0; char delete_send[1024]; ::ZeroMemory(delete_send, sizeof(delete_send)); FillNpcInfo(delete_send, delete_index, INFO_DELETE); min_x = (sx-1)*SIGHT_SIZE_X; max_x = (sx+2)*SIGHT_SIZE_X; min_y = (sy-1)*SIGHT_SIZE_Y; max_y = (sy+2)*SIGHT_SIZE_Y; SendToRange(pCom, delete_send, delete_index, min_x, min_y, max_x, max_y); } <file_sep>/GuildMapInfoSet.cpp // GuildMapInfoSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "GuildMapInfoSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CGuildMapInfoSet IMPLEMENT_DYNAMIC(CGuildMapInfoSet, CRecordset) CGuildMapInfoSet::CGuildMapInfoSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CGuildMapInfoSet) m_sMapIndex = 0; m_tPkMode = 0; m_sReleatedStore = 0; m_sGuildZone = 0; m_nFields = 4; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CGuildMapInfoSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CGuildMapInfoSet::GetDefaultSQL() { return _T("[dbo].[GUILD_MAP_INFO]"); } void CGuildMapInfoSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CGuildMapInfoSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sMapIndex]"), m_sMapIndex); RFX_Byte(pFX, _T("[tPkMode]"), m_tPkMode); RFX_Int(pFX, _T("[sReleatedStore]"), m_sReleatedStore); RFX_Int(pFX, _T("[sGuildZone]"), m_sGuildZone); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CGuildMapInfoSet diagnostics #ifdef _DEBUG void CGuildMapInfoSet::AssertValid() const { CRecordset::AssertValid(); } void CGuildMapInfoSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/RecoverRate.h // RecoverRate.h: interface for the CRecoverRate class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_RECOVERRATE_H__0A857551_A515_4E70_8FF8_18BE0C8BA5F7__INCLUDED_) #define AFX_RECOVERRATE_H__0A857551_A515_4E70_8FF8_18BE0C8BA5F7__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CRecoverRate { public: short m_sSid; short m_sHpSpeedupTime; BYTE m_byHpSpeedupLevel; short m_sSpSpeedupTime; BYTE m_bySpSpeedupLevel; short m_sPpSpeedupTime; BYTE m_byPpSpeedupLevel; CRecoverRate(); virtual ~CRecoverRate(); }; #endif // !defined(AFX_RECOVERRATE_H__0A857551_A515_4E70_8FF8_18BE0C8BA5F7__INCLUDED_) <file_sep>/GuildHouse.h // GuildHouse.h: interface for the CGuildHouse class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_GUILDHOUSE_H__A5E786E0_7209_4C84_8A14_039ACFFEF5CD__INCLUDED_) #define AFX_GUILDHOUSE_H__A5E786E0_7209_4C84_8A14_039ACFFEF5CD__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CGuildHouse { public: void UpdateGuildHouse(); CGuildHouse(); virtual ~CGuildHouse(); int iSid; int iGuild; int iZone; int iPotalX; int iPotalY; int iMarkNpc; long lUsed; }; #endif // !defined(AFX_GUILDHOUSE_H__A5E786E0_7209_4C84_8A14_039ACFFEF5CD__INCLUDED_) <file_sep>/NpcSayTable.cpp // NpcSayTable.cpp: implementation of the CNpcSayTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "NpcSayTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CNpcSayTable::CNpcSayTable() { iSid = 0; Say = _T(""); } CNpcSayTable::~CNpcSayTable() { } <file_sep>/SummonTable.cpp // SummonTable.cpp: implementation of the CSummonTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "SummonTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CSummonTable::CSummonTable() { m_sSid = 0; m_strName = _T(""); } CSummonTable::~CSummonTable() { } <file_sep>/NpcChatSet.cpp // NpcChatSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "NpcChatSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CNpcChatSet IMPLEMENT_DYNAMIC(CNpcChatSet, CRecordset) CNpcChatSet::CNpcChatSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CNpcChatSet) m_sCid = 0; m_sSize = 0; m_strTalk = _T(""); m_nFields = 3; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CNpcChatSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CNpcChatSet::GetDefaultSQL() { return _T("[dbo].[NPC_CHAT]"); } void CNpcChatSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CNpcChatSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sCid]"), m_sCid); RFX_Int(pFX, _T("[sSize]"), m_sSize); RFX_Text(pFX, _T("[strTalk]"), m_strTalk); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CNpcChatSet diagnostics #ifdef _DEBUG void CNpcChatSet::AssertValid() const { CRecordset::AssertValid(); } void CNpcChatSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/PsiTable.h // PsiTable.h: interface for the CPsiTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_PSITABLE_H__89EA456F_CFAC_4163_BBA4_38FC183C0CE5__INCLUDED_) #define AFX_PSITABLE_H__89EA456F_CFAC_4163_BBA4_38FC183C0CE5__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CPsiTable { public: short m_sSid; short m_sPid; CString m_strName; DWORD m_iNeedDN; DWORD m_iNeedXP; short m_sDelayTime; short m_sHoldTime; BYTE m_tClass; BYTE m_tNeedLevel; int m_tNeedPP; BYTE m_tRange; BYTE m_tRegi; BYTE m_tTarget; CString m_strText; short m_sBasic; short m_sLevelUp; CPsiTable(); ~CPsiTable(); }; #endif // !defined(AFX_PSITABLE_H__89EA456F_CFAC_4163_BBA4_38FC183C0CE5__INCLUDED_) <file_sep>/MonsterDamageRateSet.cpp // MonsterDamageRateSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "MonsterDamageRateSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CMonsterDamageRateSet IMPLEMENT_DYNAMIC(CMonsterDamageRateSet, CRecordset) CMonsterDamageRateSet::CMonsterDamageRateSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CMonsterDamageRateSet) m_F1 = 0; m_F2 = 0; m_F3 = 0; m_F4 = 0; m_F5 = 0; m_F6 = 0; m_F7 = 0; m_F8 = 0; m_F9 = 0; m_F10 = 0; m_F11 = 0; m_F12 = 0; m_F13 = 0; m_F14 = 0; m_F15 = 0; m_F16 = 0; m_F17 = 0; m_F18 = 0; m_F19 = 0; m_F20 = 0; m_F21 = 0; m_F22 = 0; m_F23 = 0; m_F24 = 0; m_F25 = 0; m_F26 = 0; m_F27 = 0; m_F28 = 0; m_F29 = 0; m_F30 = 0; m_F31 = 0; m_F32 = 0; m_F33 = 0; m_F34 = 0; m_F35 = 0; m_F36 = 0; m_F37 = 0; m_F38 = 0; m_F39 = 0; m_F40 = 0; m_F41 = 0; m_F42 = 0; m_F43 = 0; m_F44 = 0; m_F45 = 0; m_F46 = 0; m_F47 = 0; m_F48 = 0; m_F49 = 0; m_F50 = 0; m_F51 = 0; m_F52 = 0; m_F53 = 0; m_F54 = 0; m_F55 = 0; m_F56 = 0; m_F57 = 0; m_F58 = 0; m_F59 = 0; m_F60 = 0; m_F61 = 0; m_F62 = 0; m_F63 = 0; m_F64 = 0; m_F65 = 0; m_F66 = 0; m_F67 = 0; m_F68 = 0; m_F69 = 0; m_F70 = 0; m_F71 = 0; m_F72 = 0; m_F73 = 0; m_F74 = 0; m_F75 = 0; m_F76 = 0; m_F77 = 0; m_F78 = 0; m_F79 = 0; m_F80 = 0; m_F81 = 0; m_F82 = 0; m_F83 = 0; m_F84 = 0; m_F85 = 0; m_F86 = 0; m_F87 = 0; m_F88 = 0; m_F89 = 0; m_F90 = 0; m_F91 = 0; m_F92 = 0; m_F93 = 0; m_F94 = 0; m_F95 = 0; m_F96 = 0; m_F97 = 0; m_F98 = 0; m_F99 = 0; m_F100 = 0; m_nFields = 100; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CMonsterDamageRateSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=z6game;PWD=!<PASSWORD>"); } CString CMonsterDamageRateSet::GetDefaultSQL() { return _T("[dbo].[MON_DAMAGE_RATE]"); } void CMonsterDamageRateSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CMonsterDamageRateSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[F1]"), m_F1); RFX_Int(pFX, _T("[F2]"), m_F2); RFX_Int(pFX, _T("[F3]"), m_F3); RFX_Int(pFX, _T("[F4]"), m_F4); RFX_Int(pFX, _T("[F5]"), m_F5); RFX_Int(pFX, _T("[F6]"), m_F6); RFX_Int(pFX, _T("[F7]"), m_F7); RFX_Int(pFX, _T("[F8]"), m_F8); RFX_Int(pFX, _T("[F9]"), m_F9); RFX_Int(pFX, _T("[F10]"), m_F10); RFX_Int(pFX, _T("[F11]"), m_F11); RFX_Int(pFX, _T("[F12]"), m_F12); RFX_Int(pFX, _T("[F13]"), m_F13); RFX_Int(pFX, _T("[F14]"), m_F14); RFX_Int(pFX, _T("[F15]"), m_F15); RFX_Int(pFX, _T("[F16]"), m_F16); RFX_Int(pFX, _T("[F17]"), m_F17); RFX_Int(pFX, _T("[F18]"), m_F18); RFX_Int(pFX, _T("[F19]"), m_F19); RFX_Int(pFX, _T("[F20]"), m_F20); RFX_Int(pFX, _T("[F21]"), m_F21); RFX_Int(pFX, _T("[F22]"), m_F22); RFX_Int(pFX, _T("[F23]"), m_F23); RFX_Int(pFX, _T("[F24]"), m_F24); RFX_Int(pFX, _T("[F25]"), m_F25); RFX_Int(pFX, _T("[F26]"), m_F26); RFX_Int(pFX, _T("[F27]"), m_F27); RFX_Int(pFX, _T("[F28]"), m_F28); RFX_Int(pFX, _T("[F29]"), m_F29); RFX_Int(pFX, _T("[F30]"), m_F30); RFX_Int(pFX, _T("[F31]"), m_F31); RFX_Int(pFX, _T("[F32]"), m_F32); RFX_Int(pFX, _T("[F33]"), m_F33); RFX_Int(pFX, _T("[F34]"), m_F34); RFX_Int(pFX, _T("[F35]"), m_F35); RFX_Int(pFX, _T("[F36]"), m_F36); RFX_Int(pFX, _T("[F37]"), m_F37); RFX_Int(pFX, _T("[F38]"), m_F38); RFX_Int(pFX, _T("[F39]"), m_F39); RFX_Int(pFX, _T("[F40]"), m_F40); RFX_Int(pFX, _T("[F41]"), m_F41); RFX_Int(pFX, _T("[F42]"), m_F42); RFX_Int(pFX, _T("[F43]"), m_F43); RFX_Int(pFX, _T("[F44]"), m_F44); RFX_Int(pFX, _T("[F45]"), m_F45); RFX_Int(pFX, _T("[F46]"), m_F46); RFX_Int(pFX, _T("[F47]"), m_F47); RFX_Int(pFX, _T("[F48]"), m_F48); RFX_Int(pFX, _T("[F49]"), m_F49); RFX_Int(pFX, _T("[F50]"), m_F50); RFX_Int(pFX, _T("[F51]"), m_F51); RFX_Int(pFX, _T("[F52]"), m_F52); RFX_Int(pFX, _T("[F53]"), m_F53); RFX_Int(pFX, _T("[F54]"), m_F54); RFX_Int(pFX, _T("[F55]"), m_F55); RFX_Int(pFX, _T("[F56]"), m_F56); RFX_Int(pFX, _T("[F57]"), m_F57); RFX_Int(pFX, _T("[F58]"), m_F58); RFX_Int(pFX, _T("[F59]"), m_F59); RFX_Int(pFX, _T("[F60]"), m_F60); RFX_Int(pFX, _T("[F61]"), m_F61); RFX_Int(pFX, _T("[F62]"), m_F62); RFX_Int(pFX, _T("[F63]"), m_F63); RFX_Int(pFX, _T("[F64]"), m_F64); RFX_Int(pFX, _T("[F65]"), m_F65); RFX_Int(pFX, _T("[F66]"), m_F66); RFX_Int(pFX, _T("[F67]"), m_F67); RFX_Int(pFX, _T("[F68]"), m_F68); RFX_Int(pFX, _T("[F69]"), m_F69); RFX_Int(pFX, _T("[F70]"), m_F70); RFX_Int(pFX, _T("[F71]"), m_F71); RFX_Int(pFX, _T("[F72]"), m_F72); RFX_Int(pFX, _T("[F73]"), m_F73); RFX_Int(pFX, _T("[F74]"), m_F74); RFX_Int(pFX, _T("[F75]"), m_F75); RFX_Int(pFX, _T("[F76]"), m_F76); RFX_Int(pFX, _T("[F77]"), m_F77); RFX_Int(pFX, _T("[F78]"), m_F78); RFX_Int(pFX, _T("[F79]"), m_F79); RFX_Int(pFX, _T("[F80]"), m_F80); RFX_Int(pFX, _T("[F81]"), m_F81); RFX_Int(pFX, _T("[F82]"), m_F82); RFX_Int(pFX, _T("[F83]"), m_F83); RFX_Int(pFX, _T("[F84]"), m_F84); RFX_Int(pFX, _T("[F85]"), m_F85); RFX_Int(pFX, _T("[F86]"), m_F86); RFX_Int(pFX, _T("[F87]"), m_F87); RFX_Int(pFX, _T("[F88]"), m_F88); RFX_Int(pFX, _T("[F89]"), m_F89); RFX_Int(pFX, _T("[F90]"), m_F90); RFX_Int(pFX, _T("[F91]"), m_F91); RFX_Int(pFX, _T("[F92]"), m_F92); RFX_Int(pFX, _T("[F93]"), m_F93); RFX_Int(pFX, _T("[F94]"), m_F94); RFX_Int(pFX, _T("[F95]"), m_F95); RFX_Int(pFX, _T("[F96]"), m_F96); RFX_Int(pFX, _T("[F97]"), m_F97); RFX_Int(pFX, _T("[F98]"), m_F98); RFX_Int(pFX, _T("[F99]"), m_F99); RFX_Int(pFX, _T("[F100]"), m_F100); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CMonsterDamageRateSet diagnostics #ifdef _DEBUG void CMonsterDamageRateSet::AssertValid() const { CRecordset::AssertValid(); } void CMonsterDamageRateSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/MonsterDamageRateSet.h #if !defined(AFX_MONSTERDAMAGERATESET_H__3A0EF036_28A1_4BC8_A0F0_5F871FA1905F__INCLUDED_) #define AFX_MONSTERDAMAGERATESET_H__3A0EF036_28A1_4BC8_A0F0_5F871FA1905F__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // MonsterDamageRateSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CMonsterDamageRateSet recordset class CMonsterDamageRateSet : public CRecordset { public: CMonsterDamageRateSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CMonsterDamageRateSet) // Field/Param Data //{{AFX_FIELD(CMonsterDamageRateSet, CRecordset) int m_F1; int m_F2; int m_F3; int m_F4; int m_F5; int m_F6; int m_F7; int m_F8; int m_F9; int m_F10; int m_F11; int m_F12; int m_F13; int m_F14; int m_F15; int m_F16; int m_F17; int m_F18; int m_F19; int m_F20; int m_F21; int m_F22; int m_F23; int m_F24; int m_F25; int m_F26; int m_F27; int m_F28; int m_F29; int m_F30; int m_F31; int m_F32; int m_F33; int m_F34; int m_F35; int m_F36; int m_F37; int m_F38; int m_F39; int m_F40; int m_F41; int m_F42; int m_F43; int m_F44; int m_F45; int m_F46; int m_F47; int m_F48; int m_F49; int m_F50; int m_F51; int m_F52; int m_F53; int m_F54; int m_F55; int m_F56; int m_F57; int m_F58; int m_F59; int m_F60; int m_F61; int m_F62; int m_F63; int m_F64; int m_F65; int m_F66; int m_F67; int m_F68; int m_F69; int m_F70; int m_F71; int m_F72; int m_F73; int m_F74; int m_F75; int m_F76; int m_F77; int m_F78; int m_F79; int m_F80; int m_F81; int m_F82; int m_F83; int m_F84; int m_F85; int m_F86; int m_F87; int m_F88; int m_F89; int m_F90; int m_F91; int m_F92; int m_F93; int m_F94; int m_F95; int m_F96; int m_F97; int m_F98; int m_F99; int m_F100; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CMonsterDamageRateSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_MONSTERDAMAGERATESET_H__3A0EF036_28A1_4BC8_A0F0_5F871FA1905F__INCLUDED_) <file_sep>/LOGIC_ELSE.h // LOGIC_ELSE.h: interface for the LOGIC_ELSE class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_LOGIC_ELSE_H__B1062E53_5608_11D3_BE54_00105A6B97E2__INCLUDED_) #define AFX_LOGIC_ELSE_H__B1062E53_5608_11D3_BE54_00105A6B97E2__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class LOGIC_ELSE { public: void Parse_and(char* pBuf); void Parse_or(char* pBuf); void Init(); BYTE m_LogicElse; BOOL m_bAnd; char m_LogicElseChar[MAX_LOGIC_ELSE_CHAR_LEN]; int m_LogicElseInt[MAX_LOGIC_ELSE_INT]; LOGIC_ELSE(); virtual ~LOGIC_ELSE(); }; #endif // !defined(AFX_LOGIC_ELSE_H__B1062E53_5608_11D3_BE54_00105A6B97E2__INCLUDED_) <file_sep>/EventItemNew.cpp // EventItemNew.cpp: implementation of the CEventItemNew class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "EventItemNew.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CEventItemNew::CEventItemNew() { m_sIndex = -1; m_sSid = -1; m_tQuality = 0; m_sRemain = 0; m_tSerialExist = 0; m_iThrowTerm = 0; m_iThrowRandom = 0; m_sNextYear = 0; m_sNextMonth = 0; m_sNextDay = 0; m_sNextHour = 0; m_sNextMin = 0; m_lGive = 0; } CEventItemNew::~CEventItemNew() { } <file_sep>/AddEventItemTableSet.h #if !defined(AFX_ADDEVENTITEMTABLESET_H__483BAAED_0F5B_45BF_B92D_A12E855CE01A__INCLUDED_) #define AFX_ADDEVENTITEMTABLESET_H__483BAAED_0F5B_45BF_B92D_A12E855CE01A__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // AddEventItemTableSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CAddEventItemTableSet recordset class CAddEventItemTableSet : public CRecordset { public: CAddEventItemTableSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CAddEventItemTableSet) // Field/Param Data //{{AFX_FIELD(CAddEventItemTableSet, CRecordset) int m_sSid; BYTE m_tType; BYTE m_tGiveFlag; BYTE m_tEnd; BYTE m_tUsed; CString m_strSerialNum; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CAddEventItemTableSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_ADDEVENTITEMTABLESET_H__483BAAED_0F5B_45BF_B92D_A12E855CE01A__INCLUDED_) <file_sep>/ValItemTableSet.cpp // ValItemTableSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "ValItemTableSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CValItemTableSet IMPLEMENT_DYNAMIC(CValItemTableSet, CRecordset) CValItemTableSet::CValItemTableSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CValItemTableSet) m_sSid = 0; m_sValItem01 = 0; m_tPersentVal01 = 0; m_sValItem02 = 0; m_tPersentVal02 = 0; m_sValItem03 = 0; m_tPersentVal03 = 0; m_sValItem04 = 0; m_tPersentVal04 = 0; m_sValItem05 = 0; m_tPersentVal05 = 0; m_sValItem06 = 0; m_tPersentVal06 = 0; m_nFields = 13; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CValItemTableSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=drgame"); } CString CValItemTableSet::GetDefaultSQL() { return _T("[dbo].[VAL_ITEM]"); } void CValItemTableSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CValItemTableSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Int(pFX, _T("[sValItem01]"), m_sValItem01); RFX_Byte(pFX, _T("[tPersentVal01]"), m_tPersentVal01); RFX_Int(pFX, _T("[sValItem02]"), m_sValItem02); RFX_Byte(pFX, _T("[tPersentVal02]"), m_tPersentVal02); RFX_Int(pFX, _T("[sValItem03]"), m_sValItem03); RFX_Byte(pFX, _T("[tPersentVal03]"), m_tPersentVal03); RFX_Int(pFX, _T("[sValItem04]"), m_sValItem04); RFX_Byte(pFX, _T("[tPersentVal04]"), m_tPersentVal04); RFX_Int(pFX, _T("[sValItem05]"), m_sValItem05); RFX_Byte(pFX, _T("[tPersentVal05]"), m_tPersentVal05); RFX_Int(pFX, _T("[sValItem06]"), m_sValItem06); RFX_Byte(pFX, _T("[tPersentVal06]"), m_tPersentVal06); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CValItemTableSet diagnostics #ifdef _DEBUG void CValItemTableSet::AssertValid() const { CRecordset::AssertValid(); } void CValItemTableSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/EBodyTable.h // EBodyTable.h: interface for the CEBodyTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EBODYTABLE_H__59A1C67E_8F9D_43A1_BC80_F9766EC5B86B__INCLUDED_) #define AFX_EBODYTABLE_H__59A1C67E_8F9D_43A1_BC80_F9766EC5B86B__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CEBodyTable { public: CEBodyTable(); virtual ~CEBodyTable(); public: short m_sChangeValue; short m_sRandom; short m_sSubType; BYTE m_tLevel; BYTE m_tNeedClass; BYTE m_tSid; BYTE m_tUpgrade; BYTE m_tWearInfo; }; #endif // !defined(AFX_EBODYTABLE_H__59A1C67E_8F9D_43A1_BC80_F9766EC5B86B__INCLUDED_) <file_sep>/SocketManager.h /////////////////////////////////////////////////////////////////////////////// // Socket Manager Header file define // #ifndef __SOCKETMANAGER_H #define __SOCKETMANAGER_H #include "Poolbase.h" class CSocketManager : public CPoolBaseManager { public: int sid; public: CSocketManager(); ~CSocketManager(); }; #endif <file_sep>/UNI_CHAR.h // UNI_CHAR.h: interface for the UNI_CHAR class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_UNI_CHAR_H__D72EBF2D_980C_4CC2_B41E_B0DE75BF52E9__INCLUDED_) #define AFX_UNI_CHAR_H__D72EBF2D_980C_4CC2_B41E_B0DE75BF52E9__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class UNI_CHAR { public: //typedef struct class CVJ { friend UNI_CHAR; protected: int C; int V; int J; }; //초성 중성 종성의 번호를 저장할 구조체 static BOOL CheckString(CString str); UNI_CHAR(); virtual ~UNI_CHAR(); protected: static BOOL CheckUNIChar(CVJ cvj); static CVJ UNIDivChar(char *pDivWord); protected: //유니코드의 자모 번호 //유니코드 한글 초성 순서표 static char *UniCodeC[19]; //유니코드 한글 중성 순서표 static char *UniCodeV[21]; //유니코드 한글 종성 순서표 static char *UniCodeJ[28]; }; #endif // !defined(AFX_UNI_CHAR_H__D72EBF2D_980C_4CC2_B41E_B0DE75BF52E9__INCLUDED_) <file_sep>/SCDefine.h /////////////////////////////////////////////////////////////////////////////// // Define.h // #ifndef _DEFINE_H #define _DEFINE_H #define WM_SOCKET_DATARECEIVE 7000 // // Defines About Communication #define USER_SOCKET_PORT 12000 #define OVL_RECEIVE 0X01 #define OVL_SEND 0X02 #define OVL_CLOSE 0X03 #define PACKET_START1 0XAA #define PACKET_START2 0X55 #define PACKET_END1 0X55 #define PACKET_END2 0XAA #define SOCKET_BUF_SIZE 10000 // PROTOCAL #define PROTOCOL_VER 1 #define PACKET_DATA_SIZE SOCKET_BUF_SIZE // // DEFINE MACRO PART... #define MAX_SEND_BUFF_SIZE (SOCKET_BUF_SIZE*10) // (1024*8) #define MAX_RECV_BUFF_SIZE SOCKET_BUF_SIZE // //CJSocket Protocol Parsing Data #define JS_SOCKET_BUF_SIZE SOCKET_BUF_SIZE #define JS_PACKET_DATA_SIZE SOCKET_BUF_SIZE #define JS_PACKET_START1 0XFE #define JS_PACKET_START2 0XFF #define JS_PACKET_END1 0XFF #define JS_PACKET_END2 0XFE #define JS_PROTOCOL_VER 0X01 #define J_PACKET_START1 0XFE #define J_PACKET_START2 0XFF #define J_PACKET_END1 0XFF #define J_PACKET_END2 0XFE #define BufInc(x) (x)++;(x) %= JS_SOCKET_BUF_SIZE; // State Value // #define STATE_ACCEPTED 0X01 #define STATE_CONNECTED 0X02 #define STATE_DISCONNECTED 0X03 #define STATE_CONNECT 0X04 #define receives 0 #define sends 1 #define both 2 #define UM_PROCESS_REMOVESOCKET 30000 #define UM_PROCESS_SOCKETACCEPT 30001 /////////////////////////////////////////////////////////////////////////////// // SESSION PROTOCOL DEFINE... // #define ERROR_INVALID 1 #define ERROR_CLIENTSEND 2 #define ERROR_CLIENTRECV 3 #define ERROR_SERVERSEND 4 #define ERROR_SERVERRECV 5 #define ERROR_RECVTIMEOUT 6 #define ERROR_SUSPEND 7 #define ERROR_SERVERFULL 8 #define ERROR_DENYOPEN 9 #define ERROR_UNLINK 10 #define ERROR_USER 100 /* typedef union { short int i; BYTE b[2]; } MYSHORT; */ typedef union { int i; BYTE b[4]; } MYINT; typedef union { DWORD w; BYTE b[4]; } MYDWORD; #endif /////////////////////////////////////////////////////////////////////////////// <file_sep>/YhuoIni.h // YhuoIni.h: interface for the YhuoIni class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_YHUOINI_H__91E00755_5610_4254_B6F1_F1A31A9E8E1F__INCLUDED_) #define AFX_YHUOINI_H__91E00755_5610_4254_B6F1_F1A31A9E8E1F__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class YhuoIni ///////ÅäÖÃini { public: YhuoIni(); virtual ~YhuoIni(); //[WEBURL] TCHAR WEB_URL[100]; //[·þÎñÆ÷] int EXP_50; int EXP_60; int EXP_70; int EXP_80; int EXP_90; int EXP_100; int EXP_110; int EXP_120; int EXP_130; int EXP_140; int EXP_150; int EXP_155; int EXP_160; int EXP_165; int EXP_170; int EXP_175; int EXP_180; int EXP_185; int EXP_190; int EXP_195; int EXP_200; ////[ÉèÖÃ] int bbzh; int zgdj; int mimabaohu; int djxz; int ysjg; int jtbh; int pjy; int zxsj; int gaishu; int chongdie; int xyl; int xyh; int lan; int huang; int chaoneng; int jiance; int jxrw; }; #endif // !defined(AFX_YHUOINI_H__91E00755_5610_4254_B6F1_F1A31A9E8E1F__INCLUDED_) <file_sep>/CircularBuffer.h // CircularBuffer.h: interface for the CCircularBuffer class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_CIRCULARBUFFER_H__F4D345A4_CE05_11D1_8BEE_0060979C5900__INCLUDED_) #define AFX_CIRCULARBUFFER_H__F4D345A4_CE05_11D1_8BEE_0060979C5900__INCLUDED_ #if _MSC_VER >= 1000 #pragma once #endif // _MSC_VER >= 1000 class CCircularBuffer { public: CCircularBuffer(int size=8192); virtual ~CCircularBuffer(); void PutData(char *pData, int len); void GetData(char *pData, int len); int GetOutData(char *pData); //HeadPos, 변화 void PutData(char& data); char& GetHeadData(){return m_pBuffer[m_iHeadPos];} //1 Byte Operation; //false : 모든데이터 다빠짐, TRUE: 정상적으로 진행중 BOOL HeadIncrease(int increasement=1); void SetEmpty() {m_iHeadPos=0; m_iTailPos=0;} int& GetBufferSize() {return m_iBufSize;} int& GetHeadPos() {return m_iHeadPos;} int& GetTailPos() {return m_iTailPos;} int GetValidCount(); protected: //over flow 먼저 점검한 후 IndexOverFlow 점검 BOOL IsOverFlowCondition(int &len) {return (len >= m_iBufSize-GetValidCount()) ? TRUE: FALSE;} BOOL IsIndexOverFlow(int &len) {return (len+m_iTailPos>=m_iBufSize) ? TRUE:FALSE;} void BufferResize(); //overflow condition 일때 size를 현재의 두배로 늘림 protected: int m_iBufSize; char *m_pBuffer; int m_iHeadPos; int m_iTailPos; }; inline int CCircularBuffer::GetValidCount() { int count = m_iTailPos-m_iHeadPos; if (count<0) count = m_iBufSize+count; return count; } inline void CCircularBuffer::BufferResize() { int prevBufSize = m_iBufSize; m_iBufSize <<= 1; char *pNewData = new char[m_iBufSize]; if(pNewData == NULL) return;//꿎桿錦릿딜똥 CopyMemory(pNewData, m_pBuffer, prevBufSize); if (m_iTailPos<m_iHeadPos) { CopyMemory(pNewData+prevBufSize, m_pBuffer, m_iTailPos); m_iTailPos += prevBufSize; } delete [] m_pBuffer; m_pBuffer = pNewData; } inline void CCircularBuffer::PutData(char &data) { int len = 1; while (IsOverFlowCondition(len)) BufferResize(); m_pBuffer[m_iTailPos++] = data; if (m_iTailPos==m_iBufSize) m_iTailPos = 0; } inline void CCircularBuffer::PutData(char *pData, int len) { if (len<=0) {TRACE("CCircularBuffer::PutData len is <=0\n"); return;} while (IsOverFlowCondition(len)) BufferResize(); if (IsIndexOverFlow(len)) { int FirstCopyLen = m_iBufSize-m_iTailPos; int SecondCopyLen = len - FirstCopyLen; ASSERT(FirstCopyLen); CopyMemory(m_pBuffer+m_iTailPos, pData, FirstCopyLen); if (SecondCopyLen) { CopyMemory(m_pBuffer, pData+FirstCopyLen, SecondCopyLen); m_iTailPos = SecondCopyLen; } else m_iTailPos = 0; } else { CopyMemory(m_pBuffer+m_iTailPos, pData, len); m_iTailPos += len; } } inline int CCircularBuffer::GetOutData(char *pData) { int len = GetValidCount(); int fc, sc; fc = m_iBufSize-m_iHeadPos; if (len>fc) { sc = len - fc; CopyMemory(pData, m_pBuffer+m_iHeadPos, fc); CopyMemory(pData+fc, m_pBuffer, sc); m_iHeadPos = sc; ASSERT(m_iHeadPos==m_iTailPos); } else { CopyMemory(pData, m_pBuffer+m_iHeadPos, len); m_iHeadPos += len; if (m_iHeadPos==m_iBufSize) m_iHeadPos = 0; } return len; } inline void CCircularBuffer::GetData(char *pData, int len) { ASSERT(len>0&&len<=GetValidCount()); if (len < m_iBufSize-m_iHeadPos) CopyMemory(pData, m_pBuffer+m_iHeadPos, len); else { int fc, sc; fc = m_iBufSize-m_iHeadPos; sc = len - fc; CopyMemory(pData, m_pBuffer+m_iHeadPos, fc); if (sc) CopyMemory(pData+fc, m_pBuffer, sc); } } inline BOOL CCircularBuffer::HeadIncrease(int increasement) { ASSERT(increasement<=GetValidCount()); m_iHeadPos += increasement; m_iHeadPos %= m_iBufSize; return m_iHeadPos!=m_iTailPos; } #endif // !defined(AFX_CIRCULARBUFFER_H__F4D345A4_CE05_11D1_8BEE_0060979C5900__INCLUDED_) <file_sep>/PsiTable.cpp // PsiTable.cpp: implementation of the CPsiTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "PsiTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CPsiTable::CPsiTable() { } CPsiTable::~CPsiTable() { } <file_sep>/PAMAExpSet.cpp // PAMAExpSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "PAMAExpSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CPAMAExpSet IMPLEMENT_DYNAMIC(CPAMAExpSet, CRecordset) CPAMAExpSet::CPAMAExpSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CPAMAExpSet) m_iPAMAExp = 0; m_nFields = 1; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CPAMAExpSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CPAMAExpSet::GetDefaultSQL() { return _T("[dbo].[PAMAExp]"); } void CPAMAExpSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CPAMAExpSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Long(pFX, _T("[iPAMAExp]"), m_iPAMAExp); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CPAMAExpSet diagnostics #ifdef _DEBUG void CPAMAExpSet::AssertValid() const { CRecordset::AssertValid(); } void CPAMAExpSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/Extern.h #ifndef _EXTERN_H_ #define _EXTERN_H_ #include "ErrorLog.h" #include "DGbanInfo.h"//µç¹â°å #include "YhuoIni.h" #include "SERVERINFO.h" #include "MAP.h" #include "CELL.h" #include "EVENT.h" #include "Npc.h" #include "NpcTable.h" #include "NpcItem.h" #include "NpcThread.h" #include "ItemTable.h" #include "SkillTable.h" #include "PsiTable.h" #include "LevelUpTable.h" #include "PAMAExp.h" #include "TableItem.h" #include "NpcChat.h" #include "Store.h" #include "InitItemTableSet.h" #include "MagicItemTable.h" #include "MyDB.h" #include "Recover.h" #include "RecoverRate.h" #include "DNTable.h" #include "Guild.h" #include "GuildUser.h" #include "GuildStoreTable.h" #include "EBodyTable.h" #include "SharedMemory.h" #include "GuildMapInfoTable.h" #include "GuildHouseWar.h" #include "VirtualRoom.h" #include "GuildHouse.h" #include "UserLevelUpTable.h" #include "EventItemTable.h" #include "EventItemNew.h" #include "NpcSayTable.h" #include "GuildFortress.h" #include "MonsterPsiSet.h" #include "DressingSet.h" #include "AccessoriUpTable.h" #include "EventBlockingTable.h" #include "RemodelingTable.h" #include "BoxEventTable.h" #include "SummonTable.h" #include "ServerDlg.h" #include "EBodyUpgradeTable.h" #include "EBodyIdentifyTable.h" #include "HuanshiTable.h" #include "MESSAGE.h" #include "Kaixiangzi.h" #include "OnlineShop.h" #include "SxOnlineShop.h" #include "RMBExchangeShop.h"//ÔÚÏßÉ̵ê typedef CTypedPtrArray <CPtrArray, YhuoIni*> cyehuoini; typedef CTypedPtrArray <CPtrArray, SERVERINFO*> ServerArray; typedef CTypedPtrArray <CPtrArray, ZONEINFO*> TownPotalArray; typedef CTypedPtrArray <CPtrArray, TOWNPOTAL*> RandomTownPotalArray; typedef CTypedPtrArray <CPtrArray, MAP*> ZoneArray; typedef CTypedPtrArray <CPtrArray, CELL*> CellArray; typedef CTypedPtrArray <CPtrArray, EVENT*> EventArray; typedef CTypedPtrArray <CPtrArray, CNpc*> NpcArray; typedef CTypedPtrArray <CPtrArray, CNpcTable*> NpcTableArray; typedef CTypedPtrArray <CPtrArray, CNpcThread*> NpcThreadArray; typedef CTypedPtrArray <CPtrArray, TableItem*> TableItemArray; typedef CTypedPtrArray <CPtrArray, CItemTable*> ItemTableArray; typedef CTypedPtrArray <CPtrArray, CMagicItemTable*>MagicItemTableArray; typedef CTypedPtrArray <CPtrArray, CAccessoriUpTable*> AccessoriUpTableArray; typedef CTypedPtrArray <CPtrArray, CRemodelingTable*> RemodelingTableArray; typedef CTypedPtrArray <CPtrArray, CSkillTable*> SkillTableArray; typedef CTypedPtrArray <CPtrArray, CPsiTable*> PsiTableArray; typedef CTypedPtrArray <CPtrArray, CLevelUpTable*> LevelUpTableArray; typedef CTypedPtrArray <CPtrArray, CPAMAExp*> PAMAExpArray; typedef CTypedPtrArray <CPtrArray, CNpcChat*> NpcChatArray; typedef CTypedPtrArray <CPtrArray, CStore*> StoreArray; typedef CTypedPtrArray <CPtrArray, WEATHER_INFO*> WeatherArray; typedef CTypedPtrArray <CPtrArray, InitItemTable*> InitItemTableArray; typedef CTypedPtrArray <CPtrArray, CRecover*> RecoverArray; typedef CTypedPtrArray <CPtrArray, CRecoverRate*> RecoverRateArray; typedef CTypedPtrArray <CPtrArray, CDNTable*> DNArray; typedef CTypedPtrArray <CPtrArray, CGuild*> GuildArray; typedef CTypedPtrArray <CPtrArray, CGuildUser*> GuildUserArray; typedef CTypedPtrArray <CPtrArray, CGuildStoreTable*>GuildStoreArray; typedef CTypedPtrArray <CPtrArray, CGuildHouseWar*> GuildHouseWarArray; typedef CTypedPtrArray <CPtrArray, CGuildMapInfoTable*> GuildMapArray; typedef CTypedPtrArray <CPtrArray, CSharedMemory*> SharedMemoryArray; typedef CTypedPtrArray <CPtrArray, CVirtualRoom*> VirtualRoomArray; typedef CTypedPtrArray <CPtrArray, CGuildHouse*> GuildHouseArray; typedef CTypedPtrArray <CPtrArray, CUserLevelUpTable*> UserLevelArray; typedef CTypedPtrArray <CPtrArray, CEventItemTable*> EventItemArray; typedef CTypedPtrArray <CPtrArray, CEventItemNew*> EventItemNewArray; typedef CTypedPtrArray <CPtrArray, CNpcSayTable*> NpcSayArray; typedef CTypedPtrArray <CPtrArray, CGuildFortress*> GuildFortressArray; typedef CTypedPtrArray <CPtrArray, DynamicMapList*> DynamicMapArray; typedef CTypedPtrArray <CPtrArray, CMonsterPsi*> MonsterPsiArray; typedef CTypedPtrArray <CPtrArray, DRESSING_DATA*> DressingDataArray; typedef CTypedPtrArray <CPtrArray, int*> MyServerArray; typedef CTypedPtrArray <CPtrArray, CBoxEventTable*> BoxEventTableArray; typedef CTypedPtrArray <CPtrArray, CSummonTable*> SummonTableArray; typedef CTypedPtrArray <CPtrArray, CEBodyTable*> EBodyArray; typedef CTypedPtrArray <CPtrArray, CEBodyUpgradeTable*> EBodyUpgradeTableArray; typedef CTypedPtrArray <CPtrArray, CEBodyIdentifyTable*> EBodyIdentifyTableArray; typedef CTypedPtrArray <CPtrArray, CHuanshiTable*> HuanshiArray; typedef CTypedPtrArray <CPtrArray, MESSAGE*> MessAgeArray; typedef CTypedPtrArray <CPtrArray, OnlineShop*> OnlineShopArray; typedef CTypedPtrArray <CPtrArray, SxOnlineShop*> SxOnlineShopArray;//ÊôÐÔÉ̵ê typedef CTypedPtrArray <CPtrArray, Kaixiangzi*> KaixiangziArray;//ÊôÐÔÉ̵ê typedef CTypedPtrArray <CPtrArray, CRMBExchangeShop*> OnlineRMBShopArray; //ÔÚÏßRMBÉ̵ê typedef CTypedPtrArray <CPtrArray, DGbanInfo*> DGArray;//µç¹â°å extern GuildArray g_arGuildData; extern GuildUserArray g_arGuildUser; extern GuildHouseWarArray g_arGuildHouseWar; extern GuildHouseArray g_arGuildHouse; extern GuildFortressArray g_arGuildFortress; //extern GuildStoreArray g_arGuildStore; extern InitItemTableArray g_arInitItem; extern TableItemArray g_DBItemArray; extern CMyDB g_DB[]; extern CMyDB g_DBSession[]; extern CMyDB g_DBNew[]; extern CErrorLog g_ErrorLog; extern HANDLE g_hIOCP; extern ZoneArray g_zone; // alisia extern ZoneArray g_zonesize; // extern CellArray g_cell; extern EventArray g_event; extern cyehuoini o_yehuoini; extern ServerArray g_server; extern NpcArray g_arNpc; extern NpcTableArray g_arNpcTable; extern NpcTableArray g_arCityNpcTable; extern NpcThreadArray g_arNpcThread; extern CNpcItem g_NpcItem; extern CNpcItem g_ValItem; extern BOOL g_bNpcExit; extern ItemTableArray g_arItemTable; extern MagicItemTableArray g_arMagicItemTable; extern AccessoriUpTableArray g_arAccessoriUpTable; // Accessori Upgrade Table extern CRipel g_Ripel; // Ripel's crest extern RemodelingTableArray g_arRemodelingTable1; // 1 ´Ü °³Á¶ Table extern RemodelingTableArray g_arRemodelingTable2; // 2 ´Ü °³Á¶ Table extern RemodelingTableArray g_arRemodelingTable3; // 3 ´Ü °³Á¶ Table extern CWordArray g_arRemodelingRandomIndex1; // 1 ´Ü °³Á¶ È®À² À妽º extern int g_iRemodelingArmorIndex1; // 1 ´Ü °³Á¶ ¹æ¾î±¸ ½ÃÀÛ À妽º extern CWordArray g_arRemodelingRandomIndex2; // 2 ´Ü °³Á¶ È®À² À妽º extern int g_iRemodelingArmorIndex2; // 2 ´Ü °³Á¶ ¹æ¾î±¸ ½ÃÀÛ À妽º extern CWordArray g_arRemodelingRandomIndex3; // 3 ´Ü °³Á¶ È®À² À妽º extern int g_iRemodelingArmorIndex3; // 3 ´Ü °³Á¶ ¹æ¾î±¸ ½ÃÀÛ À妽º extern BoxEventTableArray g_arBoxEventTable; // Box Event Table Array extern SummonTableArray g_arSummonTable; // Summon Monster Table extern CServerDlg* g_pMainDlg; extern SkillTableArray g_arSkillTable; extern PsiTableArray g_arPsiTable; extern LevelUpTableArray g_arLevelUpTable; extern PAMAExpArray g_arPAMAExp; extern StoreArray g_arStore; extern StoreArray g_arPsiStore; extern NpcChatArray g_arNpcChat; extern RecoverArray g_arRecoverTable; extern RecoverRateArray g_arRecoverRateTable; extern DNArray g_arDNTable; extern StoreArray g_arEBodyStore; extern EBodyArray g_arEBodyTable; extern EBodyUpgradeTableArray g_arEBodyUpgradeTable; extern EBodyIdentifyTableArray g_arEBodyIdentifyTable; extern HuanshiArray g_arHuanshiTable; extern MessAgeArray g_arMessAgeArray; extern OnlineShopArray g_arOnlineShopTable; extern SxOnlineShopArray g_arSxOnlineShopTable; extern KaixiangziArray g_arKaixiangziTable; extern OnlineRMBShopArray g_arOnlineRMBShopTable; //ÔÚÏßÉ̵ê extern DGArray g_arDGArray;//µç¹â°å extern GuildMapArray g_arMapTable; extern VirtualRoomArray g_arVirtualRoom; extern UserLevelArray g_arUserLevel; extern EventItemArray g_arEventItemTable; extern EventItemArray g_arAddEventItemTable; extern EventItemNewArray g_arEventItemNew; extern NpcSayArray g_arNpcSayEventTable; extern DynamicMapArray g_arDynamicMapTable; extern MonsterPsiArray g_arMonsterPsi; extern DressingDataArray g_arDressingData; extern MyServerArray g_arMyServer; extern BOOL g_bDebug; extern int g_ItemSerialIndex; extern int g_8x[]; extern int g_8y[]; extern int g_16x[]; extern int g_16y[]; extern POINT g_rPt[8][6]; extern int g_BrawlClass[]; // ÇØ´ç °è¿­¿¡¼­ »ç¿ëÇÒ¼öÀÖ´Â ¹«±â°è¿­ Á¾·ù(¾ÆÀÌÅÛ Å×À̺í Á¾·ù) extern int g_StaffClass[]; extern int g_EdgedClass[]; extern int g_FireArmsClass[]; extern BYTE g_DynamicSkillInfo[]; //extern _int64 g_OnlineStart; extern _int64 g_OnlineEnd; //extern _int64 g_Online_Update_ticks; extern _int64 g_OnlineMinStart; extern _int64 g_Online_Update_Min_ticks; extern long g_GameTime; extern long g_GameDay; extern long g_GameMinute; extern long g_TotalNPC; // DB¿¡ÀÖ´Â ÃÑ ¼ö extern long g_CurrentNPCError; // ¼¼ÆÃ¿¡¼­ ½ÇÆÐÇÑ ¼ö extern long g_CurrentNPC; // ÇöÀç °ÔÀÓ»ó¿¡¼­ ½ÇÁ¦·Î ¼ÂÆÃµÈ ¼ö extern long g_CurrentGuildCount;// ÇöÀç ±æµå¿¡ µî·ÏÇÑ ¸¶Áö¸· sid¹øÈ£ extern WeatherArray g_WeatherZone; extern TownPotalArray g_TownPotal; extern RandomTownPotalArray g_RandomTownPotal; extern int g_CityRank[5][3]; // ½Ã¹Îµî±Þ´ç °¨¼ÒÄ¡ extern int g_CityRankValue[]; // ½Ã¹Îµî±Þ ´©ÀûÄ¡ extern BOOL g_sanBaoLv ; //Èý±¶±¬ÂÊ extern BOOL g_sanJingYan;//Èý±¶¾­Ñé extern CFile g_fpSpeedHack; extern CFile g_fpSpeedHack0; extern CFile g_fpBackServer; extern CFile g_fpGuildHouseRank; extern CFile g_fpEventItem; extern NpcThreadArray g_arEventNpcThread; extern int g_ClassWeight[]; // °¢ °è¿­º° ±âº» ¹«°Ô extern SharedMemoryArray g_arSharedMemory; extern SharedMemoryArray g_arStoreSharedMemory; extern SharedMemoryArray g_arFortressSharedMemory; extern SharedMemoryArray g_arGuildHouseSharedMemory; extern bool isDoubleExp; extern bool isDoubleExpup4; extern int g_ItemHsUpgrade[]; extern int g_HuanShiJiLv[]; extern int g_ItemAttUpgrade[]; extern int g_ItemDefUpgrade[]; extern int g_ItemDefUpgrade_[]; extern int g_ItemAccessoriUpgrade[]; extern int g_ItemBlessingUpgrade[]; extern int g_ItemNormalDownUpgrade[]; extern volatile long g_bShutDown; extern volatile long g_dwItemSerial; // Item Logging extern DWORD g_dwFortressTime; extern CEventBlockingTable *g_pEventBlock; extern int g_iMoonEvent; extern int g_iAccessoriSlot[MAX_ACCESSORI]; extern int g_iGuardianiSlot[GUARDIAN_SLOT]; extern const short SID_RING_OF_LIFE; extern const short SID_NECKLACE_OF_SHIELD; extern const short SID_EARRING_OF_PROTECT; extern const short SID_EARRING_OF_FEIFEI; #endif<file_sep>/AccessoriUpTable.h // AccessoriUpTable.h: interface for the CAccessoriUpTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_ACCESSORIUPTABLE_H__70323D5B_2930_4873_BF22_52D1BA6EBE7E__INCLUDED_) #define AFX_ACCESSORIUPTABLE_H__70323D5B_2930_4873_BF22_52D1BA6EBE7E__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CRipel { public: CWordArray m_arRipelTop; CWordArray m_arRipelBottom; CWordArray m_arRipelLeft; CWordArray m_arRipelRight; CWordArray m_arRipelCrest; CRipel(); ~CRipel(); }; class CAccessoriUpTable { public: CAccessoriUpTable(); virtual ~CAccessoriUpTable(); int m_sAid; int m_sSid; BYTE m_tType; BYTE m_tGroup; }; #endif // !defined(AFX_ACCESSORIUPTABLE_H__70323D5B_2930_4873_BF22_52D1BA6EBE7E__INCLUDED_) <file_sep>/BoxEventTable.cpp // BoxEventTable.cpp: implementation of the CBoxEventTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "BoxEventTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CBoxEventTable::CBoxEventTable() { } CBoxEventTable::~CBoxEventTable() { } <file_sep>/NpcThread.h // NpcThread.h: interface for the CNpcThread class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_NPCTHRED_H__ACE39ADA_BAFB_4EA8_A953_AD9FD18C2918__INCLUDED_) #define AFX_NPCTHRED_H__ACE39ADA_BAFB_4EA8_A953_AD9FD18C2918__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #include "Npc.h" #include "COM.h" UINT NpcThreadProc(LPVOID pParam /* NPC_THREAD_INFO ptr */); typedef struct _NPC_THREAD_INFO { COM* pCom; CNpc* pNpc[NPC_NUM]; long m_lNpcUsed[NPC_NUM]; //&&&&&&&&&&&& Test Code HWND hWndMsg; } NPC_THREAD_INFO; class CNpcThread { public: CNpcThread(); virtual ~CNpcThread(); public: void InitThreadInfo(HWND hwnd); // USER* m_pUser; COM* m_pCom; CNpc* m_pNpc[NPC_NUM]; NPC_THREAD_INFO m_ThreadInfo; CWinThread* m_pThread; }; #endif // !defined(AFX_NPCTHRED_H__ACE39ADA_BAFB_4EA8_A953_AD9FD18C2918__INCLUDED_) <file_sep>/GuildMapInfoTable.h // GuildMapInfoTable.h: interface for the CGuildMapInfoTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_GUILDMAPINFOTABLE_H__497347F6_4FFA_4CA4_8E49_7447C9977C10__INCLUDED_) #define AFX_GUILDMAPINFOTABLE_H__497347F6_4FFA_4CA4_8E49_7447C9977C10__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CGuildMapInfoTable { public: CGuildMapInfoTable(); virtual ~CGuildMapInfoTable(); public: short m_sMapIndex; BYTE m_tPkMode; short m_sStoreID; // 상점 번호 short m_sStoreIndex; // 실제 배열 참조 인덱스 short m_sStoreZone; // 상점 내 지역이다. }; #endif // !defined(AFX_GUILDMAPINFOTABLE_H__497347F6_4FFA_4CA4_8E49_7447C9977C10__INCLUDED_) <file_sep>/DBItemInfo.cpp // DBItemInfo.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "DBItemInfo.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CDBItemInfo IMPLEMENT_DYNAMIC(CDBItemInfo, CRecordset) CDBItemInfo::CDBItemInfo(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CDBItemInfo) m_sNum = 0; m_strName = _T(""); m_iCost = 0; m_tArm = 0; m_tUseLevel = 0; m_tUseType = 0; m_sPicNum = 0; m_sWg = 0; m_sDur = 0; m_sMaxAt = 0; m_sAtDelay = 0; m_tDmgX = 0; m_tDmgY = 0; m_tDmgZ = 0; m_tDf = 0; m_tAt = 0; m_tCrit = 0; m_tRange = 0; m_tOut = 0; m_tBullNum = 0; m_tBullType = 0; m_tStErr = 0; m_tStDf = 0; m_tStCure = 0; m_tHP = 0; m_tPP = 0; m_sSP = 0; m_nFields = 27; //}}AFX_FIELD_INIT m_nDefaultType = dynaset; } CString CDBItemInfo::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CDBItemInfo::GetDefaultSQL() { return _T("[dbo].[DBITEM]"); } void CDBItemInfo::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CDBItemInfo) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sNum]"), m_sNum); RFX_Text(pFX, _T("[strName]"), m_strName); RFX_Long(pFX, _T("[iCost]"), m_iCost); RFX_Byte(pFX, _T("[tArm]"), m_tArm); RFX_Byte(pFX, _T("[tUseLevel]"), m_tUseLevel); RFX_Byte(pFX, _T("[tUseType]"), m_tUseType); RFX_Int(pFX, _T("[sPicNum]"), m_sPicNum); RFX_Int(pFX, _T("[sWg]"), m_sWg); RFX_Int(pFX, _T("[sDur]"), m_sDur); RFX_Int(pFX, _T("[sMaxAt]"), m_sMaxAt); RFX_Int(pFX, _T("[sAtDelay]"), m_sAtDelay); RFX_Byte(pFX, _T("[tDmgX]"), m_tDmgX); RFX_Byte(pFX, _T("[tDmgY]"), m_tDmgY); RFX_Byte(pFX, _T("[tDmgZ]"), m_tDmgZ); RFX_Byte(pFX, _T("[tDf]"), m_tDf); RFX_Byte(pFX, _T("[tAt]"), m_tAt); RFX_Byte(pFX, _T("[tCrit]"), m_tCrit); RFX_Byte(pFX, _T("[tRange]"), m_tRange); RFX_Byte(pFX, _T("[tOut]"), m_tOut); RFX_Byte(pFX, _T("[tBullNum]"), m_tBullNum); RFX_Byte(pFX, _T("[tBullType]"), m_tBullType); RFX_Byte(pFX, _T("[tStErr]"), m_tStErr); RFX_Byte(pFX, _T("[tStDf]"), m_tStDf); RFX_Byte(pFX, _T("[tStCure]"), m_tStCure); RFX_Byte(pFX, _T("[tHP]"), m_tHP); RFX_Byte(pFX, _T("[tPP]"), m_tPP); RFX_Int(pFX, _T("[sSP]"), m_sSP); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CDBItemInfo diagnostics #ifdef _DEBUG void CDBItemInfo::AssertValid() const { CRecordset::AssertValid(); } void CDBItemInfo::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/EBodyTable.cpp // EBodyTable.cpp: implementation of the CEBodyTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "EBodyTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CEBodyTable::CEBodyTable() { m_sChangeValue = 0; m_sRandom = 0; m_sSubType = 0; m_tLevel = 0; m_tNeedClass = 0; m_tSid = 0; m_tUpgrade = 0; m_tWearInfo = 0; } CEBodyTable::~CEBodyTable() { } <file_sep>/BoxEventTable.h // BoxEventTable.h: interface for the CBoxEventTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_BOXEVENTTABLE_H__8754E2BF_A4CE_4A2B_8043_5EAB5566F9D2__INCLUDED_) #define AFX_BOXEVENTTABLE_H__8754E2BF_A4CE_4A2B_8043_5EAB5566F9D2__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CBoxEventTable { public: short m_sSid; BYTE m_tType; short m_sIid; short m_sCount; CString m_strText; CBoxEventTable(); virtual ~CBoxEventTable(); }; #endif // !defined(AFX_BOXEVENTTABLE_H__8754E2BF_A4CE_4A2B_8043_5EAB5566F9D2__INCLUDED_) <file_sep>/UserManager.h /////////////////////////////////////////////////////////////////////////////// // Socket Manager Header file define // #ifndef __USERMANAGER_H #define __USERMANAGER_H #include "Poolbase.h" class CUserManager : public CPoolBaseManager { public: int sid; public: CUserManager(); ~CUserManager(); void ResourceFree( int uid ); }; #endif <file_sep>/StdAfx.h // stdafx.h : include file for standard system include files, // or project specific include files that are used frequently, but // are changed infrequently // #if !defined(AFX_STDAFX_H__2D74873E_1BDB_470C_8FD4_4AD406DA5693__INCLUDED_) #define AFX_STDAFX_H__2D74873E_1BDB_470C_8FD4_4AD406DA5693__INCLUDED_ //#define _CRT_SECURE_NO_DEPRECATE #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #include "MemDefine.h" //#define FD_SETSIZE MAX_USER #define VC_EXTRALEAN // Exclude rarely-used stuff from Windows headers #define _WIN32_WINNT 0x0500 #include <afxwin.h> // MFC core and standard components #include <afxext.h> // MFC extensions #include <afxdtctl.h> // MFC support for Internet Explorer 4 Common Controls #ifndef _AFX_NO_AFXCMN_SUPPORT #include <afxcmn.h> // MFC support for Windows Common Controls #endif // _AFX_NO_AFXCMN_SUPPORT #include <winsock2.h> // Winsock 2 #include <mswsock.h> // Winsock 2 #include <afxdb.h> // ODBC #include <afxtempl.h> // collection //#define _GETVARSTRING_DEBUG #include "Global.h" #include "define.h" #define SErr(c, f) g_ErrorLog.ShowError((c), f) #define WErr(c, f) g_ErrorLog.WriteError((c), f) #define WErr2(c, f, e) g_ErrorLog.WriteError2((c), f, e) #ifdef _DEBUG #define BREAKPOINT() __asm { int 3 } #else #define BREAKPOINT() ; #endif //#define BREAKPOINT() __asm { int 3 } //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_STDAFX_H__2D74873E_1BDB_470C_8FD4_4AD406DA5693__INCLUDED_) <file_sep>/Hyper.h // Hyper.h: interface for the CHyper class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_HYPER_H__25CE02AC_A0AB_4C48_BA34_86D3185407F8__INCLUDED_) #define AFX_HYPER_H__25CE02AC_A0AB_4C48_BA34_86D3185407F8__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CHyper { public: CHyper(); virtual ~CHyper(); struct HyperText{ int m_nCol; int m_nRow; int m_nWidth; int m_nEvent; COLORREF m_FontColor; int m_nZoneNum; int m_nTextlength; CString m_strText; }; struct HyperImg{ CRect rectImg; int m_Event; int m_ZoneNum; int m_nImgId; }; public: void InitBuf(); void GetHyperText(TCHAR *pBuf, int nSize); TCHAR m_Buf[10000]; CPoint m_StartPos; int m_nViewLineCount; int m_nCountLine; HyperText m_HyperText[1024]; int m_nImgCount; HyperImg m_HyperImg[100]; void load(); int save(TCHAR *pBuf); }; #endif // !defined(AFX_HYPER_H__25CE02AC_A0AB_4C48_BA34_86D3185407F8__INCLUDED_) <file_sep>/NpcSayTable.h // NpcSayTable.h: interface for the CNpcSayTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_NPCSAYTABLE_H__3894E9FC_C509_4B70_87F5_6122AA54A925__INCLUDED_) #define AFX_NPCSAYTABLE_H__3894E9FC_C509_4B70_87F5_6122AA54A925__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CNpcSayTable { public: CNpcSayTable(); virtual ~CNpcSayTable(); int iSid; CString Say; }; #endif // !defined(AFX_NPCSAYTABLE_H__3894E9FC_C509_4B70_87F5_6122AA54A925__INCLUDED_) <file_sep>/RecoverRateSet.cpp // RecoverRateSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "RecoverRateSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CRecoverRateSet IMPLEMENT_DYNAMIC(CRecoverRateSet, CRecordset) CRecoverRateSet::CRecoverRateSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CRecoverRateSet) m_sSid = 0; m_sHpSpeedupTime = 0; m_byHpSpeedupLevel = 0; m_sSpSpeedupTime = 0; m_bySpSpeedupLevel = 0; m_sPpSpeedupTime = 0; m_byPpSpeedupLevel = 0; m_nFields = 7; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CRecoverRateSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CRecoverRateSet::GetDefaultSQL() { return _T("[dbo].[USER_RECOVER_RATE]"); } void CRecoverRateSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CRecoverRateSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Int(pFX, _T("[sHpSpeedupTime]"), m_sHpSpeedupTime); RFX_Byte(pFX, _T("[byHpSpeedupLevel]"), m_byHpSpeedupLevel); RFX_Int(pFX, _T("[sSpSpeedupTime]"), m_sSpSpeedupTime); RFX_Byte(pFX, _T("[bySpSpeedupLevel]"), m_bySpSpeedupLevel); RFX_Int(pFX, _T("[sPpSpeedupTime]"), m_sPpSpeedupTime); RFX_Byte(pFX, _T("[byPpSpeedupLevel]"), m_byPpSpeedupLevel); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CRecoverRateSet diagnostics #ifdef _DEBUG void CRecoverRateSet::AssertValid() const { CRecordset::AssertValid(); } void CRecoverRateSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/EventItemNew.h // EventItemNew.h: interface for the CEventItemNew class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EVENTITEMNEW_H__FEA39E3A_EC00_48E6_BCA2_17DA11B9AB37__INCLUDED_) #define AFX_EVENTITEMNEW_H__FEA39E3A_EC00_48E6_BCA2_17DA11B9AB37__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CEventItemNew { public: CEventItemNew(); virtual ~CEventItemNew(); public: short m_sIndex; short m_sSid; BYTE m_tQuality; short m_sRemain; BYTE m_tSerialExist; int m_iThrowTerm; int m_iThrowRandom; short m_sNextYear; short m_sNextMonth; short m_sNextDay; short m_sNextHour; short m_sNextMin; long m_lGive; }; #endif // !defined(AFX_EVENTITEMNEW_H__FEA39E3A_EC00_48E6_BCA2_17DA11B9AB37__INCLUDED_) <file_sep>/SResourceArray.h /////////////////////////////////////////////////////////////////////////////// // Socket Resource array header files #ifndef __SOCKETRESOURCEARRAY_H #define __SOCKETRESOURCEARRAY_H #include "Poolbase.h" class CSockResourceArray : public CResourceArray { public: CSockResourceArray() {}; CSockResourceArray(long dwSize); virtual ~CSockResourceArray(); long GetResourceHandle(); void ReleaseResourceHandle(long hResHandle); int GetUsedResourceCount(); void SetFree(const long position); int TimeOutSetFree(const long position); }; #endif<file_sep>/EventItemTable.h // EventItemTable.h: interface for the CEventItemTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EVENTITEMTABLE_H__183DD812_8C74_489D_AA89_A74DCA3300C8__INCLUDED_) #define AFX_EVENTITEMTABLE_H__183DD812_8C74_489D_AA89_A74DCA3300C8__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CEventItemTable { public: CEventItemTable(); virtual ~CEventItemTable(); int m_sSid; BYTE m_tEnd; BYTE m_tType; BYTE m_tGiveFlag; TCHAR m_strSerialNum[EVENT_ITEM_LENGTH + 1]; BYTE m_tGiveItem; DWORD m_dwTick; long m_lUsed; }; #endif // !defined(AFX_EVENTITEMTABLE_H__183DD812_8C74_489D_AA89_A74DCA3300C8__INCLUDED_) <file_sep>/IOCPBASE.h /////////////////////////////////////////////////////////////////////////////// // I/O COMPLETION PORT BASE HEADER FILE // #ifndef _IOCOMPLETIONPORTBASE_H #define _IOCOMPLETIONPORTBASE_H #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #define MAXWORKERTHREAD 1000 #define SOCKET_FOR_NONE 0 #define SOCKET_FOR_JANG 1 #define SOCKET_FOR_GAME 2 #define SOCKET_FOR_GIBO 3 #define MAX_WORKER_THREAD AUTOMATA_THREAD*2+2 #include "Cbsocket.h" #include "Mcommon.h" class CBSocket; class CIOCPBASE; struct _ThreadPacket { CIOCPBASE *pIocpbase; HANDLE *phListenSockEvent; SOCKET *pListenSock; int iSocketType; }; typedef _ThreadPacket THREADPACKET; struct _DataProcThreadPacket { CIOCPBASE *pIocpbase; DWORD (*fn)(LPVOID); }; typedef _DataProcThreadPacket DATAPROCTHREADPACKET; typedef CTypedPtrArray <CPtrArray, SOCKET*> CLSocketArray; typedef CTypedPtrArray <CPtrArray, HANDLE*> CLEventArray; typedef CTypedPtrArray <CPtrArray, THREADPACKET*> CThreadPacketArray; class CIOCPSocket; class CPoolBaseManager; class WAIT_RECV_DATA; //IKING 2002.7.3 class CIOCPBASE { public: // IKING 2002.7.3 CRITICAL_SECTION m_CS_ReceiveData[MAX_WORKER_THREAD+1]; int m_nHeadPtr[MAX_WORKER_THREAD+1]; int m_nTailPtr[MAX_WORKER_THREAD+1]; WAIT_RECV_DATA *m_pRecvData[MAX_WORKER_THREAD+1][WAIT_RECV_DATA_BUFFER+1]; DATAPROCTHREADPACKET m_Dptp; int m_CurDataProcThreadNo; CWinThread *m_acceptThread; CWinThread *m_hWorkerThread[MAX_WORKER_THREAD+1]; int m_bAcceptEnableFlag; int m_ThreadCount; int m_CurThreadNo; int m_CurRecvThreadNo; int m_nDataListLength[MAX_WORKER_THREAD+1]; CPtrList m_pSendDataList[MAX_WORKER_THREAD+1]; CPoolBaseManager *m_pPBM; CRITICAL_SECTION m_SendDataCriticalSection[MAX_WORKER_THREAD+1]; HANDLE m_CreateSignalEvent; HANDLE m_hIOCPort; CLSocketArray m_ListenSocketArray; CLEventArray m_hListenEventArray; CThreadPacketArray m_ThreadPacketArray; SOCKET *m_pSocketListen; HANDLE *m_phListenSocketEvent; THREADPACKET *m_pThreadPacket; BYTE *m_pIopendingData; int m_nIopendingDataCount; int m_bIOPendingStop; protected: DWORD m_dwNumberOfWorkers; DWORD m_dwConcurrency; int m_Type; public: CIOCPBASE(); ~CIOCPBASE(); void SetThreadCount(int tc = 1) { m_ThreadCount = tc; }; BOOL Listen(int port, char *svrAddress = NULL); void CreateAcceptThread(); void CreateWorkerThread(); BOOL Associate(CIOCPSocket *pUser); int GetSocketType() { return m_Type; }; void SetSocketType(int type) { m_Type = type; }; int Init( int type, CPoolBaseManager *pPBM, DWORD (*fn)(LPVOID lp) = NULL ); void SetAliveTimeUpdate(int uid, DWORD ctick ); }; #endif <file_sep>/EBodyStoreSet.cpp // EBodyStoreSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "EBodyStoreSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CEBodyStoreSet IMPLEMENT_DYNAMIC(CEBodyStoreSet, CRecordset) CEBodyStoreSet::CEBodyStoreSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CEBodyStoreSet) m_sStoreID = 0; m_sEbodyID = 0; m_nFields = 2; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CEBodyStoreSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CEBodyStoreSet::GetDefaultSQL() { return _T("[dbo].[EBODY_STORE]"); } void CEBodyStoreSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CEBodyStoreSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sStoreID]"), m_sStoreID); RFX_Int(pFX, _T("[sEbodyID]"), m_sEbodyID); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CEBodyStoreSet diagnostics #ifdef _DEBUG void CEBodyStoreSet::AssertValid() const { CRecordset::AssertValid(); } void CEBodyStoreSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/RecoverRateSet.h #if !defined(AFX_RECOVERRATESET_H__15CB7AEE_34F3_4D2C_88C8_FE46B4655AF0__INCLUDED_) #define AFX_RECOVERRATESET_H__15CB7AEE_34F3_4D2C_88C8_FE46B4655AF0__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // RecoverRateSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CRecoverRateSet recordset class CRecoverRateSet : public CRecordset { public: CRecoverRateSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CRecoverRateSet) // Field/Param Data //{{AFX_FIELD(CRecoverRateSet, CRecordset) int m_sSid; int m_sHpSpeedupTime; BYTE m_byHpSpeedupLevel; int m_sSpSpeedupTime; BYTE m_bySpSpeedupLevel; int m_sPpSpeedupTime; BYTE m_byPpSpeedupLevel; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CRecoverRateSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_RECOVERRATESET_H__15CB7AEE_34F3_4D2C_88C8_FE46B4655AF0__INCLUDED_) <file_sep>/DNTableSet.cpp // DNTableSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "DNTableSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CDNTableSet IMPLEMENT_DYNAMIC(CDNTableSet, CRecordset) CDNTableSet::CDNTableSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CDNTableSet) m_sIndex = 0; m_sMinDn = 0; m_sMaxDn = 0; m_nFields = 3; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CDNTableSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CDNTableSet::GetDefaultSQL() { return _T("[dbo].[DN]"); } void CDNTableSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CDNTableSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sIndex]"), m_sIndex); RFX_Int(pFX, _T("[sMinDn]"), m_sMinDn); RFX_Int(pFX, _T("[sMaxDn]"), m_sMaxDn); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CDNTableSet diagnostics #ifdef _DEBUG void CDNTableSet::AssertValid() const { CRecordset::AssertValid(); } void CDNTableSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/SocketManager.cpp /////////////////////////////////////////////////////////////////////////////// // Socket Manager Class #include "StdAfx.h" #include "Socketmanager.h" CSocketManager::CSocketManager() { } CSocketManager::~CSocketManager() { } <file_sep>/PoolBase.h /////////////////////////////////////////////////////////////////////////////// // Poolbase.h // #ifndef __POOLBASE_H #define __POOLBASE_H #define E_NORESOURCESAVAILABLE 0x80002838 const int SPINX = 5000; // Spin count for critical sections struct RHANDLE { long handle; long pos; }; struct RHANDLEHEADER { const RHANDLE* pRH; bool bFree; DWORD dwTime; }; struct DNODE { const RHANDLE* pRH; DNODE* prev; DNODE* next; }; class CPoolBaseManager; struct POOL_THREAD { int id; CPoolBaseManager *pBM; }; // List class is linked list of RHANDLEs class CFreeList { private: int m_nArraySize; DNODE **m_pDNodeArray; int m_iNodeHead; int m_iNodeTail; public: CFreeList(); CFreeList(int size); ~CFreeList(); const RHANDLE* Pop(); void AddTail(const RHANDLE* newHandle); void RemoveAll(); DWORD GetCount(); private: DWORD m_Count; DNODE* m_pHead; DNODE* m_pTail; CRITICAL_SECTION m_cs; }; // CResourceArray Class manages data structures for handle information class CResourceArray { public: CResourceArray() {}; CResourceArray(long dwSize); ~CResourceArray(); RHANDLEHEADER** m_pHandles; long m_size; const RHANDLE* Add(); const RHANDLE* AddNew(int hPos); void RemoveAll(); bool IsFree(const long position); long GetCount(); void SetTimeStamp( const long position, long nTimeStamp = 0 ); RHANDLE* GetHandle(const long position); long GetDataValue(const long position); virtual long GetResourceHandle(); virtual void ReleaseResourceHandle(long hResHandle); virtual void SetBusy(const long position); virtual void SetFree(const long position); virtual int TimeOutSetFree(const long position); private: CRITICAL_SECTION m_cs; long m_count; }; class CPoolBaseManager { public: int m_nResourceWaitingCount; DWORD m_dwNumCurrentResources; CFreeList* m_pFreeList; CResourceArray* m_pResources; HANDLE m_hStopEvent; DWORD m_dwAllocationPoll; DWORD m_dwMinPoolSize; bool m_bStartAllocator; DWORD m_dwResourceAllocSize; DWORD m_dwDeadResourcePoll; HANDLE m_hAllocThread; HANDLE m_hRefreshThread; protected: CRITICAL_SECTION m_cs; int dwMaxResources; //DWORD m_dwNumCurrentResources; DWORD m_dwMaxResources; DWORD m_dwHandleLifeTime; DWORD m_dwStartResources; DWORD m_dwWaitForHandleTime; public: DWORD FreeResourcesLeft(); DWORD GetNumResources(); void SetStop(); int AddResource(int hPos); DWORD AllocateResources(DWORD dwNumAdd); void DeallocateResources(); void ReleaseDeadResources(); DWORD GetFreeResource(const RHANDLE** poutHandle); void ReleaseResource(const RHANDLE* handle); public: CPoolBaseManager(); ~CPoolBaseManager(); void Init(DWORD dwMaxResources, DWORD dwStartResources, DWORD dwWaitForHandleTime, DWORD dwHandleLifeTime, DWORD dwAllocationPoll, DWORD dwMinPoolSize, DWORD dwResourceAllocSize, DWORD dwDeadResourcePoll, CResourceArray *pRA ); }; #endif <file_sep>/Kaixiangzi.cpp // OnlineShop.cpp: implementation of the OnlineShop class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "Kaixiangzi.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// Kaixiangzi::Kaixiangzi() { m_oSid = 0; m_iSid = 0; ::ZeroMemory(m_iSname, sizeof(m_iSname)); m_price = 0; m_iNum = 0; m_sx1 = 0; m_sx2 = 0; m_sx3 = 0; m_sx4 = 0; m_sx5 = 0; m_upgrade = 0; m_sx6 = 0; m_sx7 = 0; m_sx8 = 0; m_sx9 = 0; m_sx10 = 0; ::ZeroMemory(m_iText, sizeof(m_iText)); } Kaixiangzi::~Kaixiangzi() { } <file_sep>/GuildFortressTaxSet.h #if !defined(AFX_GUILDFORTRESSTAXSET_H__EEDD28F4_457F_4B8F_87CF_C3B8CB0E601A__INCLUDED_) #define AFX_GUILDFORTRESSTAXSET_H__EEDD28F4_457F_4B8F_87CF_C3B8CB0E601A__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // GuildFortressTaxSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CGuildFortressTaxSet recordset class CGuildFortressTaxSet : public CRecordset { public: CGuildFortressTaxSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CGuildFortressTaxSet) // Field/Param Data //{{AFX_FIELD(CGuildFortressTaxSet, CRecordset) int m_sFortressID; int m_sStoreID01; int m_sStoreID02; int m_sStoreID03; int m_sStoreID04; int m_sStoreID05; int m_sStoreID06; int m_sStoreID07; int m_sStoreID08; int m_sStoreID09; int m_sStoreID10; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CGuildFortressTaxSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_GUILDFORTRESSTAXSET_H__EEDD28F4_457F_4B8F_87CF_C3B8CB0E601A__INCLUDED_) <file_sep>/ValItemTable.h // ValItemTable.h: interface for the CValItemTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_VALITEMTABLE_H__599082F5_F9E2_43A9_952E_C1C419C4599B__INCLUDED_) #define AFX_VALITEMTABLE_H__599082F5_F9E2_43A9_952E_C1C419C4599B__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CValItemTable { public: CValItemTable(); virtual ~CValItemTable(); int m_sSid; int m_sValItem01; BYTE m_tPersentVal01; int m_sValItem02; BYTE m_tPersentVal02; int m_sValItem03; BYTE m_tPersentVal03; int m_sValItem04; BYTE m_tPersentVal04; int m_sValItem05; BYTE m_tPersentVal05; int m_sValItem06; BYTE m_tPersentVal06; }; #endif // !defined(AFX_VALITEMTABLE_H__599082F5_F9E2_43A9_952E_C1C419C4599B__INCLUDED_) <file_sep>/ExceptionZoneSet.cpp // ExceptionZoneSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "ExceptionZoneSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CExceptionZoneSet IMPLEMENT_DYNAMIC(CExceptionZoneSet, CRecordset) CExceptionZoneSet::CExceptionZoneSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CExceptionZoneSet) m_sZone = 0; m_tFighterType = 0; m_tEventMopType = 0; m_nFields = 3; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CExceptionZoneSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame"); } CString CExceptionZoneSet::GetDefaultSQL() { return _T("[dbo].[ZONE_EXCEPTION]"); } void CExceptionZoneSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CExceptionZoneSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sZone]"), m_sZone); RFX_Byte(pFX, _T("[tFighterType]"), m_tFighterType); RFX_Byte(pFX, _T("[tEventMopType]"), m_tEventMopType); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CExceptionZoneSet diagnostics #ifdef _DEBUG void CExceptionZoneSet::AssertValid() const { CRecordset::AssertValid(); } void CExceptionZoneSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/LevelUpTable.h // LevelUpTable.h: interface for the CLevelUpTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_LEVELUPTABLE_H__7DEEAB25_1E2A_4E7D_B5D1_C0E386F7A4B8__INCLUDED_) #define AFX_LEVELUPTABLE_H__7DEEAB25_1E2A_4E7D_B5D1_C0E386F7A4B8__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CLevelUpTable { public: long long m_dwExp; short m_sLevel; BYTE m_tBasicUp; CLevelUpTable(); ~CLevelUpTable(); }; #endif // !defined(AFX_LEVELUPTABLE_H__7DEEAB25_1E2A_4E7D_B5D1_C0E386F7A4B8__INCLUDED_) <file_sep>/NpcSayEventSet.cpp // NpcSayEventSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "NpcSayEventSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CNpcSayEventSet IMPLEMENT_DYNAMIC(CNpcSayEventSet, CRecordset) CNpcSayEventSet::CNpcSayEventSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CNpcSayEventSet) m_sSid = 0; m_strNpcSay = _T(""); m_nFields = 2; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CNpcSayEventSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=drgame"); } CString CNpcSayEventSet::GetDefaultSQL() { return _T("[dbo].[NPC_SAY_EVENT]"); } void CNpcSayEventSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CNpcSayEventSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sSid]"), m_sSid); RFX_Text(pFX, _T("[strNpcSay]"), m_strNpcSay); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CNpcSayEventSet diagnostics #ifdef _DEBUG void CNpcSayEventSet::AssertValid() const { CRecordset::AssertValid(); } void CNpcSayEventSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/NpcItemSet.h #if !defined(AFX_NPCITEMSET_H__CF367426_D68D_4381_9220_884D259AEB01__INCLUDED_) #define AFX_NPCITEMSET_H__CF367426_D68D_4381_9220_884D259AEB01__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // NpcItemSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CNpcItemSet recordset class CNpcItemSet : public CRecordset { public: CNpcItemSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CNpcItemSet) // Field/Param Data //{{AFX_FIELD(CNpcItemSet, CRecordset) int m_sIndex; int m_sItem01; int m_sPersent01; int m_sItem02; int m_sPersent02; int m_sItem03; int m_sPersent03; int m_sItem04; int m_sPersent04; int m_sItem05; int m_sPersent05; int m_sItem06; int m_sPersent06; int m_sItem07; int m_sPersent07; int m_sItem08; int m_sPersent08; int m_sItem09; int m_sPersent09; int m_sItem10; int m_sPersent10; int m_sItem11; int m_sPersent11; int m_sItem12; int m_sPersent12; int m_sItem13; int m_sPersent13; int m_sItem14; int m_sPersent14; int m_sItem15; int m_sPersent15; int m_sItem16; int m_sPersent16; int m_sItem17; int m_sPersent17; int m_sItem18; int m_sPersent18; int m_sItem19; int m_sPersent19; int m_sItem20; int m_sPersent20; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CNpcItemSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_NPCITEMSET_H__CF367426_D68D_4381_9220_884D259AEB01__INCLUDED_) <file_sep>/Server.cpp ///////////////////////////////////////////////////////////////////// // Server.cpp : Defines the class behaviors for the application. // Copyright (C) 2000, <NAME> // // If this code works, it was written by <NAME>. // If not, I don't know who wrote it. // #include "stdafx.h" #include "Server.h" #include "ServerDlg.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CServerApp BEGIN_MESSAGE_MAP(CServerApp, CWinApp) //{{AFX_MSG_MAP(CServerApp) // NOTE - the ClassWizard will add and remove mapping macros here. // DO NOT EDIT what you see in these blocks of generated code! //}}AFX_MSG ON_COMMAND(ID_HELP, CWinApp::OnHelp) END_MESSAGE_MAP() ///////////////////////////////////////////////////////////////////////////// // CServerApp construction CServerApp::CServerApp() { // TODO: add construction code here, // Place all significant initialization in InitInstance } CServerApp::~CServerApp() { VERIFY(SOCKET_ERROR != WSACleanup()); } ///////////////////////////////////////////////////////////////////////////// // The one and only CServerApp object CServerApp theApp; ///////////////////////////////////////////////////////////////////////////// // CServerApp initialization LONG WINAPI MyUnhandledExceptionFilter(struct _EXCEPTION_POINTERS *pExceptionPointers) ; //出错回调seh LONG WINAPI ExceptionFilter(PEXCEPTION_POINTERS pExceptionInfo) { static bool showex=false; if(!showex) showex=true; else return EXCEPTION_EXECUTE_HANDLER; TCHAR modulename[MAX_PATH],szError[1000],filename[MAX_PATH],*p; MEMORY_BASIC_INFORMATION mbinfo; VirtualQuery(pExceptionInfo->ExceptionRecord->ExceptionAddress,&mbinfo,sizeof(mbinfo)); GetModuleFileName((HMODULE)mbinfo.AllocationBase,filename,MAX_PATH); p=_tcsrchr(filename,'\\'); p++; lstrcpy(modulename,p); try { SYSTEMTIME st; GetLocalTime(&st); TCHAR FileName[MAX_PATH]; ::CreateDirectory(_T("Error"),NULL); wsprintf(FileName,_T("Error\\errorlog-%d-%d.txt"),st.wYear,st.wMonth); CFile syslog; syslog.Open(FileName,CFile::modeNoTruncate| CFile::modeCreate |CFile::modeReadWrite); syslog.SeekToEnd(); TCHAR szDate[200]; wsprintf(szDate,_T("%d-%d-%d %d:%d:%d\r\n"),st.wYear,st.wMonth,st.wDay,st.wHour,st.wMinute,st.wSecond); syslog.Write(szDate,_tcslen(szDate)); wsprintf(szError,_T("异常: 模块:%s, 模块基址:0x%x, 异常代码:0x%x, 指令地址:0x%x"), modulename,mbinfo.AllocationBase,pExceptionInfo->ExceptionRecord->ExceptionCode,pExceptionInfo->ExceptionRecord->ExceptionAddress); //USES_CONVERSION; //syslog.Write(T2A(szError),strlen(T2A(szError))); syslog.Write(szError,_tcslen(szError)); char nextLine[] = ("\r\n"); syslog.Write(nextLine,strlen(nextLine)); syslog.Close(); } catch(CException *e) { e->Delete(); } wsprintf(szError,_T("程序遇到错误需关闭,请检查错误模块,运行调试解决.\n模块:%s,模块基址:0x%x,异常代码:0x%x,指令地址:0x%x"), modulename,mbinfo.AllocationBase,pExceptionInfo->ExceptionRecord->ExceptionCode,pExceptionInfo->ExceptionRecord->ExceptionAddress); //MessageBox(NULL,szError,_T("系统"),MB_ICONERROR|MB_OK); //MyUnhandledExceptionFilter(pExceptionInfo); return EXCEPTION_EXECUTE_HANDLER; } BOOL CServerApp::InitInstance() { // Standard initialization // If you are not using these features and wish to reduce the size // of your final executable, you should remove from the following // the specific initialization routines you do not need. // Load Winsock 2.2 if(!LoadWinsock()) return FALSE; #ifdef _AFXDLL // Enable3dControls(); // Call this when using MFC in a shared DLL #else //Enable3dControlsStatic(); // Call this when linking to MFC statically #endif // 促惫绢 滚傈 父甸扁 HINSTANCE hInstanceLanguagePack = LoadLibrary("DRRCKorean.dll"); if (NULL != hInstanceLanguagePack) AfxSetResourceHandle(hInstanceLanguagePack); // CServerDlg dlg; m_pMainWnd = &dlg; dlg.m_InstanceChecker.ActivateChecker(); if (dlg.m_InstanceChecker.PreviousInstanceRunning()) { //AfxMessageBox(_T("Previous version detected, will now restore it"), MB_OK); dlg.m_InstanceChecker.ActivatePreviousInstance(); m_pMainWnd = NULL; return FALSE; } int nResponse = dlg.DoModal(); if (nResponse == IDOK) { // TODO: Place code here to handle when the dialog is // dismissed with OK } else if (nResponse == IDCANCEL) { // TODO: Place code here to handle when the dialog is // dismissed with Cancel } AfxSetResourceHandle(AfxGetInstanceHandle()); // Since the dialog has been closed, return FALSE so that we exit the // application, rather than start the application's message pump. return FALSE; } ////////////////////////////////////////////////////////////////////////////// // Load Winsock DLL ver 2.2 // BOOL CServerApp::LoadWinsock() { CString strError; // request minimum Winsock 2.2 WORD wVersionRequested = MAKEWORD( 2, 2 ); WSADATA wsaData; int err = WSAStartup( wVersionRequested, &wsaData ); if( err != 0 ) { strError.Format(_T("WSAStartup Error, Error# = %d"), ::GetLastError()); AfxMessageBox(strError); return FALSE; } if( LOBYTE( wsaData.wVersion ) != 2 || HIBYTE( wsaData.wVersion ) != 2 ) { strError.Format( _T("Unsupported WinSock version %d.%d" ), LOBYTE( wsaData.wVersion ), HIBYTE( wsaData.wVersion ) ); AfxMessageBox(strError); return FALSE; } return TRUE; } <file_sep>/LOGIC.cpp // LOGIC.cpp: implementation of the LOGIC class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "LOGIC.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// LOGIC::LOGIC() { } LOGIC::~LOGIC() { } void LOGIC::Parse(char *pBuf) { int index = 0, i = 0; char temp[1024]; index += ParseSpace( temp, pBuf+index ); if( !strcmp( temp, "POSITION" ) ) { m_Logic = LOGIC_POSITION; index += ParseSpace( temp, pBuf+index ); m_LogicInt[i] = atoi( temp ); i++; index += ParseSpace( temp, pBuf+index ); m_LogicInt[i] = atoi( temp ); i++; index += ParseSpace( temp, pBuf+index ); m_LogicInt[i] = atoi( temp ); i++; } // else if( !strcmp( temp, "" ) // { // } } void LOGIC::Init() { for( int i = 0; i < MAX_LOGIC_INT; i++) { m_LogicInt[i] = -1; } m_LogicChar[0] = 0; } <file_sep>/LOGIC.h // LOGIC.h: interface for the LOGIC class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_LOGIC_H__7514FC24_511B_11D3_BE41_00105A6B97E2__INCLUDED_) #define AFX_LOGIC_H__7514FC24_511B_11D3_BE41_00105A6B97E2__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class LOGIC { public: void Init(); BYTE m_Logic; int m_LogicInt[MAX_LOGIC_INT]; char m_LogicChar[MAX_LOGIC_CHAR_LEN+1]; void Parse(char* pBuf); LOGIC(); virtual ~LOGIC(); }; #endif // !defined(AFX_LOGIC_H__7514FC24_511B_11D3_BE41_00105A6B97E2__INCLUDED_) <file_sep>/RoyalRumbleUser.cpp // RoyalRumbleUser.cpp: implementation of the CRoyalRumbleUser class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "RoyalRumbleUser.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CRoyalRumbleUser::CRoyalRumbleUser() { m_iUID = -1; memset( m_strUserID, NULL, CHAR_NAME_LENGTH+1 ); m_bLive = TRUE; } CRoyalRumbleUser::~CRoyalRumbleUser() { } <file_sep>/StdAfx.cpp // stdafx.cpp : source file that includes just the standard includes // Server.pch will be the pre-compiled header // stdafx.obj will contain the pre-compiled type information #include "stdafx.h" //#ifdef _DEBUG //#pragma comment(lib," ./LibsIncs/JvCryption.lib") //#else //#pragma comment(lib,"./LibsIncs/JvCryption.lib") //#endif <file_sep>/Item.h // Item.h: interface for the Item class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_ITEM_H__59D3FA6E_13EE_49E1_9995_9D1878750A76__INCLUDED_) #define AFX_ITEM_H__59D3FA6E_13EE_49E1_9995_9D1878750A76__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class TableItem; class Item { public: Item(); virtual ~Item(); TableItem* GetTableItem(int num); BOOL ParseItemData(char* pBuf); void FillItemData(char* pBuf); public: short m_TNum; // 테이블 아이템 번호 BYTE m_ItemNum; // 해당 item의 중복수 BYTE m_Type; // 아이템 종류 BYTE m_Arm; // 장착위치 BYTE m_Uselevel; // 사용가능 레벨 BYTE m_UseType; // 사용가능 계열 short m_PicNum; // 그림 번호 short m_Wg; // SP감소량 short m_Dur; // 내구도 short m_MaxAt; // 최대 공격력 short m_AtDelay; // 공격 딜레이 BYTE m_DmgX; // 대미지 X BYTE m_DmgY; // 대미지Y BYTE m_DmgZ; // 대미지Z BYTE m_Df; // 방어력 BYTE m_At; // 공격 명중값 BYTE m_Crit; // 크리티컬 확률 BYTE m_Range; // 사정거리 BYTE m_Out; // 고장률 BYTE m_BullNum; // 탄창수 BYTE m_BullType; // 탄창종류 BYTE m_StErr; // 상태이상 BYTE m_StDf; // 상태이상 방어 BYTE m_StCure; // 상태이상 치료 BYTE m_Hp; // 회복 BYTE m_Pp; // 회복 short m_Sp;; // 회복 DWORD m_Cost; // 가격 char m_Name[ITEM_NAME_LENGTH]; }; #endif // !defined(AFX_ITEM_H__59D3FA6E_13EE_49E1_9995_9D1878750A76__INCLUDED_) <file_sep>/NpcItem.h // NpcItem.h: interface for the CNpcItem class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_NPCITEM_H__56D8779F_271E_4B39_92CF_3DA33366FBA8__INCLUDED_) #define AFX_NPCITEM_H__56D8779F_271E_4B39_92CF_3DA33366FBA8__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CNpcItem { public: int **m_ppItem; int m_nRow; int m_nField; CNpcItem(); ~CNpcItem(); }; struct drop_novelity { char name[64];//物品名称 byte code1;//物品代码 byte code2; int per; }; struct drop_info { char name[64]; //怪物名称 byte code1;//怪物种类 byte code2; int money;//掉钱数量 int DropLeechdom;//掉药品几率 int DropNovelity;//掉物品几率 int n; //物品种类数量 struct drop_novelity novelity[256]; }; struct convert_table{ //100级物品生成表 char src_name[64]; SHORT Ssid; char dst_name[64]; SHORT Dsid; BOOL have; }; #endif // !defined(AFX_NPCITEM_H__56D8779F_271E_4B39_92CF_3DA33366FBA8__INCLUDED_) <file_sep>/EventBlockingTable.h // EventBlockingTable.h: interface for the CEventBlockingTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_EVENTBLOCKINGTABLE_H__D3D9F2C1_E343_4F92_B8BB_43CD0B341E1B__INCLUDED_) #define AFX_EVENTBLOCKINGTABLE_H__D3D9F2C1_E343_4F92_B8BB_43CD0B341E1B__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 typedef struct _EVENT { short sBlockType; short sX; short sY; short sZone; } Event; typedef CTypedPtrArray <CPtrArray, Event*> _EventBlockArray; class CEventBlockingTable { public: BOOL CheckUserEvent(int z, int x, int y, int type); CEventBlockingTable(); virtual ~CEventBlockingTable(); public: _EventBlockArray m_arSellBlock; _EventBlockArray m_arBuyBlock; _EventBlockArray m_arRepaireBlock; _EventBlockArray m_arCureBlock; _EventBlockArray m_arTelBlock; }; #endif // !defined(AFX_EVENTBLOCKINGTABLE_H__D3D9F2C1_E343_4F92_B8BB_43CD0B341E1B__INCLUDED_) <file_sep>/SERVERINFO.h /////////////////////////////////////////////////////////////////// // ServerDlg.h : header file for the CServerDlg class // Copyright (C) 2000, <NAME> // // If this code works, it was written by <NAME>. // If not, I don't know who wrote it. // #if !defined(AFX_SERVERINFO_H__7A7FB74E_858C_4B3F_86FC_850CBC924BE2__INCLUDED_) #define AFX_SERVERINFO_H__7A7FB74E_858C_4B3F_86FC_850CBC924BE2__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 typedef CTypedPtrArray <CPtrArray, int*> ZoneNumArray; typedef struct _ZONEINFO { int iSid; int iZone; int iPkMode; int iPotalX; int iPotalY; int iPotalX1; int iPotalY1; int iPotalZone; } ZONEINFO; typedef struct _TOWNPOTAL { int iPotalZone; POINT iPotal[3]; } TOWNPOTAL; class SERVERINFO { public: SERVERINFO(); virtual ~SERVERINFO(); int m_sid; int m_sDBIndex; CString m_szAddr; int m_nPort; CString m_strGTime; ZoneNumArray m_zone; }; #endif // !defined(AFX_SERVERINFO_H__7A7FB74E_858C_4B3F_86FC_850CBC924BE2__INCLUDED_) <file_sep>/MESSAGE.cpp // MESSAGE.cpp: implementation of the MESSAGE class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "MESSAGE.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// MESSAGE::MESSAGE() { m_ID = 0; ::ZeroMemory(m_MESSAGE_BODY, sizeof(m_MESSAGE_BODY)); } MESSAGE::~MESSAGE() { } <file_sep>/Recover.h // Recover.h: interface for the CRecover class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_RECOVER_H__84E9F8A9_AA80_40E2_B2A1_9F400FE53F29__INCLUDED_) #define AFX_RECOVER_H__84E9F8A9_AA80_40E2_B2A1_9F400FE53F29__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CRecover { public: short m_sSid; BYTE m_byHPDivide; BYTE m_bySPDivide; BYTE m_byPPDivide; BYTE m_byTown; CRecover(); virtual ~CRecover(); }; #endif // !defined(AFX_RECOVER_H__84E9F8A9_AA80_40E2_B2A1_9F400FE53F29__INCLUDED_) <file_sep>/MagicItemTable.cpp // MagicItemTable.cpp: implementation of the CMagicItemTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "MagicItemTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CMagicItemTable::CMagicItemTable() { } CMagicItemTable::~CMagicItemTable() { } int CMagicItemTable::GetMagicItemValue() { return m_sChangeValue; } <file_sep>/DNTable.h // DNTable.h: interface for the CDNTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_DNTABLE_H__1FFF660F_50C7_40CD_8F95_AE8D128E2A64__INCLUDED_) #define AFX_DNTABLE_H__1FFF660F_50C7_40CD_8F95_AE8D128E2A64__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CDNTable { public: CDNTable(); virtual ~CDNTable(); int m_sIndex; int m_sMinDn; int m_sMaxDn; }; #endif // !defined(AFX_DNTABLE_H__1FFF660F_50C7_40CD_8F95_AE8D128E2A64__INCLUDED_) <file_sep>/HuanshiTableSet.cpp #include "stdafx.h" #include "server.h" #include "HuanshiTableSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// IMPLEMENT_DYNAMIC(CHuanshiTableSet, CRecordset) CHuanshiTableSet::CHuanshiTableSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CHuanshiTableSet) m_sChangeValue = 0; m_sRandom = 0; m_sSubType = 0; m_strText = _T(""); m_tLevel = 0; m_tNeedClass = 0; m_tSid = 0; m_tUpgrade = 0; m_tWearInfo = 0; m_nFields = 9; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CHuanshiTableSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CHuanshiTableSet::GetDefaultSQL() { return _T("[dbo].[HUAN_STNO]"); } void CHuanshiTableSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CEBodyTableSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sChangeValue]"), m_sChangeValue); RFX_Int(pFX, _T("[sRandom]"), m_sRandom); RFX_Int(pFX, _T("[sSubType]"), m_sSubType); RFX_Text(pFX, _T("[strText]"), m_strText); RFX_Byte(pFX, _T("[tLevel]"), m_tLevel); RFX_Byte(pFX, _T("[tNeedClass]"), m_tNeedClass); RFX_Byte(pFX, _T("[tSid]"), m_tSid); RFX_Byte(pFX, _T("[tUpgrade]"), m_tUpgrade); RFX_Byte(pFX, _T("[tWearInfo]"), m_tWearInfo); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CHuanshiTableSet diagnostics #ifdef _DEBUG void CHuanshiTableSet::AssertValid() const { CRecordset::AssertValid(); } void CHuanshiTableSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/ItemTable.h // ItemTable.h: interface for the CItemTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_ITEMTABLE_H__B000F7FB_9D51_4A8A_A5C5_2342982FB23D__INCLUDED_) #define AFX_ITEMTABLE_H__B000F7FB_9D51_4A8A_A5C5_2342982FB23D__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CItemTable { public: short m_sSid; short m_sPid; CString m_strName; CString m_strText; BYTE m_byWeight; DWORD m_iDN; BYTE m_byRLevel; BYTE m_byClass; BYTE m_byWear; short m_sDuration; short m_sDefense; BYTE m_byAX; BYTE m_byAY; BYTE m_byAZ; BYTE m_byRstr; BYTE m_byRdex; BYTE m_byRvol; short m_sAttackDelay; BYTE m_byRange; BYTE m_byErrorRate; short m_sBullNum; BYTE m_byBullType; BYTE m_bySubDefense; BYTE m_bySubCure; short m_sRepairHP; short m_sRepairPP; short m_sRepairSP; short m_sEvent; short m_sZone; BYTE m_byMPP; short m_sCTime; BYTE m_bySpecial; CItemTable(); ~CItemTable(); }; #endif // !defined(AFX_ITEMTABLE_H__B000F7FB_9D51_4A8A_A5C5_2342982FB23D__INCLUDED_) <file_sep>/NpcTable.h // NpcTable.h: interface for the CNpcTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_NPCTABLE_H__C9E9FA22_E50D_4EAD_980F_2A89E9EDC094__INCLUDED_) #define AFX_NPCTABLE_H__C9E9FA22_E50D_4EAD_980F_2A89E9EDC094__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CNpcTable { public: int m_sSid; // MONSTER(NPC) Serial ID int m_sPid; // MONSTER(NPC) Picture ID TCHAR m_strName[20]; // MONSTER(NPC) Name int m_sSTR; // 힘 int m_sDEX; // 민첩 int m_sVOL; // 의지 int m_sWIS; // 지혜 int m_sMaxHP; // 최대 HP int m_sMaxPP; // 최대 PP BYTE m_byClass; // 무기계열 BYTE m_byClassLevel; // 무기계열 레벨 int m_sExp; // 경험치 int m_byAX; // 공격값 X int m_byAY; // 공격값 Y int m_byAZ; // 공격값 Z int m_iDefense; // 방어값 BYTE m_byRange; // 사정거리 int m_sAI; // 인공지능 인덱스 int m_sAttackDelay; // 공격딜레이 BYTE m_byVitalC; // 신체데미지 크리티컬 BYTE m_byWildShot; // 난사 레벨 BYTE m_byExcitedRate; // 흥분 레벨 BYTE m_byIronSkin; // 방어 BYTE m_byReAttack; // 반격 BYTE m_bySubAttack; // 상태이상 발생(부가공격) BYTE m_byState; // 몬스터 (NPC) 상태이상 BYTE m_byPsi; // 사이오닉 적용 BYTE m_byPsiLevel; // 사이오닉레벨 BYTE m_bySearchRange; // 적 탐지 범위 int m_sSpeed; // 이동속도 int m_sInclination; // 성향 (유저에게 시민등급향상을 준다.) BYTE m_byColor; // 보스급과 일반급 int m_sStandTime; // 서있는 시간 BYTE m_tNpcType; // NPC Type // 0 : Monster // 1 : Normal NPC int m_sFamilyType; // 몹들사이에서 가족관계를 결정한다. BYTE m_tItemPer; // 아이템이 떨어질 확률 BYTE m_tDnPer; // 돈이 떨어질확률 /* // int m_sCON; // 건강 // int m_sINT; // 지능 BYTE m_byCritical; // 크리티컬발생 레벨 BYTE m_byDamageC; // 데미지크리티컬 레벨 BYTE m_byDelayC; // 딜레이크리티컬 레벨 BYTE m_byOutRange; // 범위밖 사격 레벨 BYTE m_byBlowRate; // 연타 레벨 BYTE m_byCGuard; // 크리티컬가드 레벨 int m_sEvent; // 이벤트 번호 int m_sHaveItem; // 몬스터가 줄 당시 줄 아이템 인덱스 */ CNpcTable(); virtual ~CNpcTable(); }; #endif // !defined(AFX_NPCTABLE_H__C9E9FA22_E50D_4EAD_980F_2A89E9EDC094__INCLUDED_) <file_sep>/ItemTableSet.h #if !defined(AFX_ITEMTABLESET_H__BA9C5D6C_79A7_464A_B4AE_4DA76B5F4A54__INCLUDED_) #define AFX_ITEMTABLESET_H__BA9C5D6C_79A7_464A_B4AE_4DA76B5F4A54__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // ItemTableSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CItemTableSet recordset class CItemTableSet : public CRecordset { public: CItemTableSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CItemTableSet) // Field/Param Data //{{AFX_FIELD(CItemTableSet, CRecordset) BYTE m_byAX; BYTE m_byAY; BYTE m_byAZ; BYTE m_byBullType; BYTE m_byClass; BYTE m_byErrorRate; BYTE m_byMPP; BYTE m_byRange; BYTE m_byRdex; BYTE m_byRLevel; BYTE m_byRstr; BYTE m_byRvol; BYTE m_bySpecial; BYTE m_bySubCure; BYTE m_bySubDefense; BYTE m_byWear; BYTE m_byWeight; long m_iDN; int m_sAttackDelay; int m_sBullNum; int m_sCTime; int m_sDefense; int m_sDuration; int m_sEvent; int m_sPid; int m_sRepairHP; int m_sRepairPP; int m_sRepairSP; int m_sSid; CString m_strName; CString m_strText; int m_sZone; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CItemTableSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_ITEMTABLESET_H__BA9C5D6C_79A7_464A_B4AE_4DA76B5F4A54__INCLUDED_) <file_sep>/StoreSet.cpp // StoreSet.cpp : implementation file // #include "stdafx.h" #include "server.h" #include "StoreSet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CStoreSet IMPLEMENT_DYNAMIC(CStoreSet, CRecordset) CStoreSet::CStoreSet(CDatabase* pdb) : CRecordset(pdb) { //{{AFX_FIELD_INIT(CStoreSet) m_sStoreID = 0; m_sItemID = 0; m_nFields = 2; //}}AFX_FIELD_INIT m_nDefaultType = snapshot; } CString CStoreSet::GetDefaultConnect() { return _T("ODBC;DSN=drgame;UID=drgame;PWD=<PASSWORD>"); } CString CStoreSet::GetDefaultSQL() { return _T("[dbo].[STORE]"); } void CStoreSet::DoFieldExchange(CFieldExchange* pFX) { //{{AFX_FIELD_MAP(CStoreSet) pFX->SetFieldType(CFieldExchange::outputColumn); RFX_Int(pFX, _T("[sStoreID]"), m_sStoreID); RFX_Int(pFX, _T("[sItemID]"), m_sItemID); //}}AFX_FIELD_MAP } ///////////////////////////////////////////////////////////////////////////// // CStoreSet diagnostics #ifdef _DEBUG void CStoreSet::AssertValid() const { CRecordset::AssertValid(); } void CStoreSet::Dump(CDumpContext& dc) const { CRecordset::Dump(dc); } #endif //_DEBUG <file_sep>/SkillTable.cpp // SkillTable.cpp: implementation of the CSkillTable class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "SkillTable.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CSkillTable::CSkillTable() { } CSkillTable::~CSkillTable() { m_arInc.RemoveAll(); m_arRepair.RemoveAll(); m_arSuccess.RemoveAll(); } <file_sep>/RMBExchangeShop.cpp // OnlineShop.cpp: implementation of the OnlineShop class. // ////////////////////////////////////////////////////////////////////// #include "stdafx.h" #include "server.h" #include "RMBExchangeShop.h" #ifdef _DEBUG #undef THIS_FILE static char THIS_FILE[]=__FILE__; #define new DEBUG_NEW #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// CRMBExchangeShop::CRMBExchangeShop() { } CRMBExchangeShop::~CRMBExchangeShop() { } <file_sep>/RemodelingTable.h // RemodelingTable.h: interface for the CRemodelingTable class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_REMODELINGTABLE_H__3C8B8550_81F4_46C3_87F6_1CA1ACB31020__INCLUDED_) #define AFX_REMODELINGTABLE_H__3C8B8550_81F4_46C3_87F6_1CA1ACB31020__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class CRemodelingTable { public: CRemodelingTable(); virtual ~CRemodelingTable(); int m_sRid; int m_sSid; BYTE m_tWearInfo; BYTE m_tNeedClass; int m_sPlan; BYTE m_tRandom1; BYTE m_tRandom2; BYTE m_tReplace; }; #endif // !defined(AFX_REMODELINGTABLE_H__3C8B8550_81F4_46C3_87F6_1CA1ACB31020__INCLUDED_) <file_sep>/SSocket.h /////////////////////////////////////////////////////////////////////////////// // SSocket.h // #ifndef __SSOCKET_H #define __SSOCKET_H #include "Cbsocket.h" #include "IOCPSocket.h" class CIOCPBASE; class COM; ///////////////////////////////////////////////////////////////////////////// // CJSocket command target class CSSocket : public CBSocket { // Attributes public: int m_SNumber; int m_ReConnectFlag; int m_GameType; int m_GamePort; int m_DisconnectedCount; char m_GameName[50]; char m_GameMsg[50]; // Operations public: CSSocket( CIOCPBASE *pIocpbase = NULL ); virtual ~CSSocket(); // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CSSocket) public: virtual void OnClose(int nErrorCode); virtual void OnSend(int nErrorCode); //}}AFX_VIRTUAL public: void SoftClose(); void AutomataServer(char *pSBuf, int nByte); void ReceiveData(char *pBuf, int nByte); int Send( int length, char* pBuf); int Send( char* pBuf, int length ); int Init( int bufCreateFlag = 0 ); int RecycleRead(); void StopAction(); int AcceptProcess(); int SockCloseProcess(int nError=0); int SocketDisConnect(); BOOL GetPeerName( CString& addr, unsigned int& port); void ProcessData(); // Implementation protected: }; #endif <file_sep>/SxOnlineShop.h // OnlineShop.h: interface for the OnlineShop class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_SXONLINESHOP_H__8FC7E003_C60F_4477_B80F_9956092FCA11__INCLUDED_) #define AFX_SXONLINESHOP_H__8FC7E003_C60F_4477_B80F_9956092FCA11__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class SxOnlineShop { public: SxOnlineShop(); virtual ~SxOnlineShop(); public: short m_oSid; short m_iSid; TCHAR m_iSname[20]; short m_sLevel; short m_sDuration; short m_sBullNum; short m_tIQ; short m_prices; short m_iNum; short m_sx1; short m_sx2; short m_sx3; short m_sx4; short m_sx5; short m_upgrade; short m_sx6; short m_sx7; short m_sx8; short m_sx9; short m_sx10; TCHAR m_iText[50]; }; #endif // !defined(AFX_ONLINESHOP_H__8FC7E003_C60F_4477_B80F_9956092FCA11__INCLUDED_) <file_sep>/Mcommon.h #ifndef __COMMON_H #define __COMMON_H #define WAIT_RECV_DATA_BUFFER 4096 // 파라미터... //#define PACKET_DATA_SIZE SOCKET_BUF_SIZE #define MAX_USER_IN_WAIT 100 //#define AUTOMATA_THREAD 8 #define AUTOMATA_THREAD 4 #define MAX_ID 12 #define RECEIVE_BUF_SIZE SOCKET_BUF_SIZE #define DATA_BUF_SIZE_FOR_THREAD 8192 // USER STATUS #define USER_EXIST_IN_WAIT 0X01 #define USER_EXIST_IN_ROOM 0X02 #define USER_JUST_LOG_IN 0X03 #define USER_JUST_CONNECTED 0x04 #define USER_EXIST_IN_NEWUSER 0X05 // 스레드 명령어 파라메터... #define CMD_SERVER_CLOSE 1000 #define CMD_USER_CLOSE 2000 #define CMD_NEWWAITSERVER_CONNECTED 3000 #define CMD_ROOM_CLOSE 4000 // SOCKET TYPE... #define SOCKET_FOR_NONESTATE 0 #define SOCKET_FOR_SERVER 1 #define SOCKET_FOR_USER 2 #define SOCKET_FOR_USERSERVER 3 #define SOCKET_FOR_BUDDYSERVER 4 #define SOCKET_FOR_DBSERVER 5 #define SOCKET_FOR_ZONESERVER 6 // Add by JJS 2002.02.28 #define SOCKET_FOR_DISCONNECT 7 #define SOCKET_FOR_APPLICATION 9 struct __LOGINOUTTHREAD { int UID; BYTE CODE; char ID[CHAR_NAME_LENGTH+sizeof(int)+1]; __LOGINOUTTHREAD() { UID = -1; CODE = -1; ID[0] = '\0'; }; }; typedef __LOGINOUTTHREAD LOGINOUTTHREADDATA; struct __SQLDATAPACKET { int UID; BYTE code; int dcount; BYTE *pData; __SQLDATAPACKET() { UID = -1; code = -1; dcount = 0; pData = NULL; }; ~__SQLDATAPACKET() { if ( pData ) delete[] pData; pData = NULL; }; }; typedef __SQLDATAPACKET SQLDATAPACKET; struct __CHATDATAPACKET { int UID; BYTE code; int dcount; BYTE *pData; __CHATDATAPACKET() { UID = -1; code = -1; dcount = 0; pData = NULL; }; ~__CHATDATAPACKET() { if ( pData ) delete[] pData; pData = NULL; }; }; typedef __CHATDATAPACKET CHATDATAPACKET; //----------------------------------------------------------------------------- // FORTRESS PROTOCOL PART struct __FORTRESSDATAPACKET { short sFortressIndex; TCHAR FORTRESS[CHAR_NAME_LENGTH+sizeof(int)+1]; // GuildName __FORTRESSDATAPACKET() { sFortressIndex = -1; FORTRESS[0] = '\0'; }; }; typedef __FORTRESSDATAPACKET FORTRESSDATAPACKET; //----------------------------------------------------------------------------- // DB SERVER PROTOCOL PART #define DB_SERVER_IN_REQ ((BYTE)0x10) #define DB_SERVER_IN_RES ((BYTE)0x11) #endif <file_sep>/Kaixiangzi.h // Kaixiangzi.h: interface for the Kaixiangzi class. // ////////////////////////////////////////////////////////////////////// #if !defined(AFX_KAIXIANGZI_H__8FC7E003_C60F_4477_B80F_9956092FCA11__INCLUDED_) #define AFX_KAIXIANGZI_H__8FC7E003_C60F_4477_B80F_9956092FCA11__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 class Kaixiangzi { public: Kaixiangzi(); virtual ~Kaixiangzi(); public: short m_oSid; short m_iSid; TCHAR m_iSname[20]; short m_price; short m_iNum; short m_sx1; short m_sx2; short m_sx3; short m_sx4; short m_sx5; short m_upgrade; short m_sx6; short m_sx7; short m_sx8; short m_sx9; short m_sx10; TCHAR m_iText[50]; }; #endif // !defined(AFX_ONLINESHOP_H__8FC7E003_C60F_4477_B80F_9956092FCA11__INCLUDED_) <file_sep>/memdefine.h #ifndef _MEMDEFINE_H_ #define _MEMDEFINE_H_ #define MAX_USER 2000//yskang test code //#define MAX_USER 300 #define MAX_STORE 200 #define MAX_FORTRESS 20 #define CHAR_NAME_LENGTH 30 //--yskang 0.1 수정 길드 호칭부여 #define LOVE_NAME_LENGTH 60 //호칭의 최대길이... 색지정때문에 버퍼의 크기가 크짐... #define _SKILL_DB 240 #define _ITEM_DB 1500 #define _PSI_DB 100 #define _EVENT_DB 800 #define _TEL_DB 850 #define _CHANGE_DB 4 #define _QUICKITEM_DB 10 //#define _ABNORMAL_DB 5 #define _BANK_DB 1600 #define _ACCOUNT_BANK_DB 4750 #endif<file_sep>/PsiTableSet.h #if !defined(AFX_PSITABLESET_H__E896DE57_FDB1_487D_84F8_759EEEA07F27__INCLUDED_) #define AFX_PSITABLESET_H__E896DE57_FDB1_487D_84F8_759EEEA07F27__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 // PsiTableSet.h : header file // ///////////////////////////////////////////////////////////////////////////// // CPsiTableSet recordset class CPsiTableSet : public CRecordset { public: CPsiTableSet(CDatabase* pDatabase = NULL); DECLARE_DYNAMIC(CPsiTableSet) // Field/Param Data //{{AFX_FIELD(CPsiTableSet, CRecordset) long m_iNeedDN; long m_iNeedXP; int m_sBasic; int m_sDelayTime; int m_sHoldTime; int m_sLevelUp; int m_sPid; int m_sSid; CString m_strName; CString m_strText; BYTE m_tClass; BYTE m_tNeedLevel; int m_tNeedPP; BYTE m_tRange; BYTE m_tRegi; BYTE m_tTarget; //}}AFX_FIELD // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CPsiTableSet) public: virtual CString GetDefaultConnect(); // Default connection string virtual CString GetDefaultSQL(); // Default SQL for Recordset virtual void DoFieldExchange(CFieldExchange* pFX); // RFX support //}}AFX_VIRTUAL // Implementation #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif }; //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_PSITABLESET_H__E896DE57_FDB1_487D_84F8_759EEEA07F27__INCLUDED_)
05b5fbf24c6cc8046b7e9807089aff58a344c4d5
[ "C", "C++" ]
149
C++
xiamingxing/maya
ffb2e6583a75c894884fea46b19bea8ed1a590a5
50687354706d3fd041fb362b06eb705e838eed04
refs/heads/master
<repo_name>tanjak2005/ls_js_16<file_sep>/script.js // //1 // const dataTypes = [ // true, // 's', // 0, // null, // undefined, // {}, // [], // 0n, // Symbol(), // () => {}, // ] // const map = new Map([ // [true, { // value: '', // type: '', // }], // 's', // 0, // null, // undefined, // {}, // [], // 0n, // Symbol(), // () => {}, // ]); // console.log(map); // //2 // let countWords = ` Вместо тепла - зелень стекла // Вместо огня - дым // Из сетки календаря выхвачен день // Красное солнце сгорает дотла // День догорает с ним // На пылающий город падает тень // Перемен! // Требуют наши сердца // Перемен! // Требуют наши глаза // В нашем смехе и в наших слезах и в пульсации вен // Перемен! // Мы ждём перемен! // Электрический свет продолжает наш день // И коробка от спичек пуста // Но на кухне синим цветком горит газ // Сигареты в руках, чай на столе // Эта схема проста // И больше нет ничего // Всё находится в нас // Перемен! // Требуют наши сердца // Перемен! // Требуют наши глаза // В нашем смехе и в наших слезах и в пульсации вен // Перемен! // Мы ждём перемен! // Мы не можем похвастаться мудростью глаз // И умелыми жестами рук // Нам не нужно всё это, чтобы друг друга понять // Сигареты в руках, чай на столе // Так замыкается круг // И вдруг нам становится страшно что-то менять // Перемен! - требуют наши сердца // Перемен! - требуют наши глаза // В нашем смехе и в наших слезах и в пульсации вен // Перемен! // Мы ждём перемен! // Перемен! // Требуют наши сердца // Перемен! // Требуют наши глаза // В нашем смехе и в наших слезах и в пульсации вен // Перемен! // Мы ждём перемен!` // function countRepeatedWords(song) { // let countWords = song.split(" "); // let wordMap = {}; // console.log(countWords); // for (let i = 0; i < countWords.length; i++) { // let currentWordCount = wordMap[countWords[i]]; // let count = currentWordCount ? currentWordCount : 0; // wordMap[countWords[i]] = count + 1; // } // return wordMap; // }; // countRepeatedWords(countWords); // console.log(countRepeatedWords(countWords)); //2 const button = document.querySelector('#button'); const textarea = document.querySelector("#textarea"); const table = document.querySelector("table"); button.addEventListener('click', () => { const text = textarea.value .replace(/[.,\/#!$%\^&\*;:{}=\-_`()]/g, "") .replace(/\n/g, "") .toLowerCase(); let words = text.split(" "); words = words.filter((word) => word !== ""); const unicValues = new Set(words); unicValues.forEach((word) => { const row = ` <tr> <td>${word}</td> <td>${words.filter((w) => w === word).length}</td> </tr> `; table.innerHTML += row; }); console.log(unicValues); }); // console.log(button, textarea, table);
f4837bb0e38b3ed473553dcbed12c929aecdb9da
[ "JavaScript" ]
1
JavaScript
tanjak2005/ls_js_16
0aa0e1e52894e1cc462f90934c75367b06d49a22
571b40f460db3bd3e1ddbb092a4ef62534e24b2f
refs/heads/master
<repo_name>lucarosellini/kafka-kryo-codec<file_sep>/src/main/java/kafka/kryo/bson/DBObjectKryoDecoder.java package kafka.kryo.bson; import com.mongodb.DBObject; import kafka.kryo.KryoDecoder; import kafka.utils.VerifiableProperties; /** * Created by luca on 6/11/14. */ public class DBObjectKryoDecoder extends KryoDecoder<DBObject>{ @Override protected Class<DBObject> getRegisteredClass() { return DBObject.class; } public DBObjectKryoDecoder(VerifiableProperties props) { super(props); } public DBObjectKryoDecoder() { } @Override public DBObject fromBytes(byte[] serializedDoc) { return super.fromBytes(serializedDoc); } } <file_sep>/pom.xml <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.github.lucarosellini</groupId> <artifactId>kafka.kryo.codec</artifactId> <version>1.0.7</version> <name>kafka.kryo.codec</name> <description>kafka.kryo.codec</description> <url>https://github.com/lucarosellini/kafka-kryo-codec</url> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> </properties> <developers> <developer> <id>lucarosellini</id> <name><NAME></name> <email><EMAIL></email> <roles> <role>architect</role> <role>developer</role> <role>maintainer</role> </roles> </developer> </developers> <licenses> <license> <name>The Apache Software License, Version 2.0</name> <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url> <distribution>repo</distribution> </license> </licenses> <scm> <connection>scm:git:<EMAIL>:lucarosellini/kafka-kryo-codec.git</connection> <developerConnection>scm:git:<EMAIL>:lucarosellini/kafka-kryo-codec.git</developerConnection> <url>https://github.com/lucarosellini/kafka-kryo-codec</url> </scm> <repositories> <repository> <id>Typesafe repository</id> <snapshots> <enabled>true</enabled> </snapshots> <url>http://repo.typesafe.com/typesafe/releases/</url> </repository> </repositories> <dependencies> <dependency> <groupId>com.esotericsoftware.kryo</groupId> <artifactId>kryo</artifactId> <version>2.24.0</version> </dependency> <dependency> <groupId>log4j</groupId> <artifactId>log4j</artifactId> <version>1.2.17</version> </dependency> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka_2.10</artifactId> <version>0.8.1.1</version> </dependency> <dependency> <groupId>org.mongodb</groupId> <artifactId>mongo-java-driver</artifactId> <version>2.12.4</version> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>2.5.1</version> <configuration> <source>1.6</source> <target>1.6</target> <encoding>UTF-8</encoding> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-source-plugin</artifactId> <executions> <execution> <phase>package</phase> <goals> <goal>jar</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>2.9</version> <configuration> <minmemory>64m</minmemory> <maxmemory>2g</maxmemory> <outputDirectory>${project.build.directory}</outputDirectory> <detectLinks>true</detectLinks> </configuration> <executions> <execution> <phase>package</phase> <goals> <goal>jar</goal> </goals> </execution> </executions> </plugin> </plugins> </build> </project>
f7dfd35cdbf630183d12d26b671aae90f44f462a
[ "Java", "Maven POM" ]
2
Java
lucarosellini/kafka-kryo-codec
b864ea01d684f92b4895519b1ae3b73c1fb19cc4
625bf755c3da7913fad7ff0717aa895e1dc4e89a
refs/heads/master
<file_sep>using System; using System.Collections.Generic; using System.Linq; using System.Reflection; using System.Text; using System.Threading.Tasks; namespace ExtensionSugar { public static class GenericExtSugars { public static bool IsNull<T>(this T obj) { if (obj is string) { return (obj as string).IsNullOrEmpty(); } return obj == null; } public static bool IsNotNull<T>(this T obj) { if (obj is string) { return !(obj as string).IsNullOrEmpty(); } return obj != null; } public static T IfNull<T>(this T obj, object failure) { return obj.IsNotNull() ? obj : failure.To<T>(); } public static T With<T>(this T item, Action<T> action) { if (item.IsNotNull()) { action(item); } return item; } public static TInput Do<TInput>(this TInput o, Action<TInput> action) { if (o.IsNotNull()) { action(o); } return o; } public static bool In<T>(this T obj, params T[] args) { return args.Contains(obj); } public static bool In<T>(this T obj, String[] args) { return Array.IndexOf(args, obj) > -1; } public static bool In<T>(this T obj, IEnumerable<T> args) { return args.Contains(obj); } public static bool In<T>(this T obj, List<T> args) { return args.Contains(obj); } public static T ReturnSelf<T>(this T Input, Func<T, bool> check, T failureValue) where T : class { if (Input.IsNull()) return failureValue; try { return check(Input) ? Input : failureValue; } catch (Exception e) { return failureValue; } } public static T To<T>(this object o) { try { return (T)o; } catch (Exception) { try { return (T)Convert.ChangeType(o, typeof(T)); } catch (Exception) { return default(T); } } } public static IEnumerable<T> ForEach<T>(this IEnumerable<T> enumeration, Action<T> action) { foreach (T item in enumeration) { action(item); } return enumeration; } public static object GetPropertyValue<T>(this T classInstance, string propertyName) { PropertyInfo property = classInstance.GetType().GetProperty(propertyName); if (property != null) return property.GetValue(classInstance, null); return null; } } } <file_sep>using System; namespace ExtensionSugar { public static class Path { public static string Desktop() { return Environment.GetFolderPath(Environment.SpecialFolder.Desktop); } public static string Programs() { return Environment.GetFolderPath(Environment.SpecialFolder.Programs); } public static string Personal() { return Environment.GetFolderPath(Environment.SpecialFolder.Personal); } public static string MyDocuments() { return Environment.GetFolderPath(Environment.SpecialFolder.MyDocuments); } public static string Favorites() { return Environment.GetFolderPath(Environment.SpecialFolder.Favorites); } public static string Startup() { return Environment.GetFolderPath(Environment.SpecialFolder.Startup); } public static string Recent() { return Environment.GetFolderPath(Environment.SpecialFolder.Recent); } public static string SendTo() { return Environment.GetFolderPath(Environment.SpecialFolder.SendTo); } public static string StartMenu() { return Environment.GetFolderPath(Environment.SpecialFolder.StartMenu); } public static string MyMusic() { return Environment.GetFolderPath(Environment.SpecialFolder.MyMusic); } public static string MyVideos() { return Environment.GetFolderPath(Environment.SpecialFolder.MyVideos); } public static string DesktopDirectory() { return Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory); } public static string MyComputer() { return Environment.GetFolderPath(Environment.SpecialFolder.MyComputer); } public static string NetworkShortcuts() { return Environment.GetFolderPath(Environment.SpecialFolder.NetworkShortcuts); } public static string Fonts() { return Environment.GetFolderPath(Environment.SpecialFolder.Fonts); } public static string Templates() { return Environment.GetFolderPath(Environment.SpecialFolder.Templates); } public static string CommonStartMenu() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonStartMenu); } public static string CommonPrograms() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonPrograms); } public static string CommonStartup() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonStartup); } public static string CommonDesktopDirectory() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonDesktopDirectory); } public static string ApplicationData() { return Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData); } public static string PrinterShortcuts() { return Environment.GetFolderPath(Environment.SpecialFolder.PrinterShortcuts); } public static string LocalApplicationData() { return Environment.GetFolderPath(Environment.SpecialFolder.LocalApplicationData); } public static string InternetCache() { return Environment.GetFolderPath(Environment.SpecialFolder.InternetCache); } public static string Cookies() { return Environment.GetFolderPath(Environment.SpecialFolder.Cookies); } public static string History() { return Environment.GetFolderPath(Environment.SpecialFolder.History); } public static string CommonApplicationData() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData); } public static string Windows() { return Environment.GetFolderPath(Environment.SpecialFolder.Windows); } public static string System() { return Environment.GetFolderPath(Environment.SpecialFolder.System); } public static string ProgramFiles() { return Environment.GetFolderPath(Environment.SpecialFolder.ProgramFiles); } public static string MyPictures() { return Environment.GetFolderPath(Environment.SpecialFolder.MyPictures); } public static string UserProfile() { return Environment.GetFolderPath(Environment.SpecialFolder.UserProfile); } public static string SystemX86() { return Environment.GetFolderPath(Environment.SpecialFolder.SystemX86); } public static string ProgramFilesX86() { return Environment.GetFolderPath(Environment.SpecialFolder.ProgramFilesX86); } public static string CommonProgramFiles() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonProgramFiles); } public static string CommonProgramFilesX86() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonProgramFilesX86); } public static string CommonTemplates() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonTemplates); } public static string CommonDocuments() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonDocuments); } public static string CommonAdminTools() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonAdminTools); } public static string AdminTools() { return Environment.GetFolderPath(Environment.SpecialFolder.AdminTools); } public static string CommonMusic() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonMusic); } public static string CommonPictures() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonPictures); } public static string CommonVideos() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonVideos); } public static string Resources() { return Environment.GetFolderPath(Environment.SpecialFolder.Resources); } public static string LocalizedResources() { return Environment.GetFolderPath(Environment.SpecialFolder.LocalizedResources); } public static string CommonOemLinks() { return Environment.GetFolderPath(Environment.SpecialFolder.CommonOemLinks); } public static string CDBurning() { return Environment.GetFolderPath(Environment.SpecialFolder.CDBurning); } } } <file_sep>using System; using System.Collections.Generic; using System.Globalization; using System.Linq; namespace ExtensionSugar { public static class TypeExtSugars { public static Boolean IsDouble(this Type aType) { switch (Type.GetTypeCode(aType)) { case TypeCode.Decimal: case TypeCode.Double: return true; default: return false; } } public static Boolean IsDateTime(this Type aType) { switch (Type.GetTypeCode(aType)) { case TypeCode.DateTime: return true; default: return false; } } public static Boolean IsInt(this Type aType) { switch (Type.GetTypeCode(aType)) { case TypeCode.Int16: case TypeCode.Int32: case TypeCode.Int64: case TypeCode.Single: case TypeCode.UInt16: case TypeCode.UInt32: case TypeCode.UInt64: return true; default: return false; } } public static object GetDefaultValue(this Type t) { if (t.IsValueType && Nullable.GetUnderlyingType(t) == null) return Activator.CreateInstance(t); else return null; } public static object ToDefault(this Type targetType) { if (targetType == null) throw new NullReferenceException(); var mi = typeof(TypeExtSugars) .GetMethod("_ToDefaultHelper", System.Reflection.BindingFlags.Static | System.Reflection.BindingFlags.NonPublic); var generic = mi.MakeGenericMethod(targetType); var returnValue = generic.Invoke(null, new object[0]); return returnValue; } static T _ToDefaultHelper<T>() { return default(T); } public static bool IsCollection(this Type type) { return (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(ICollection<>)); } public static bool IsHasCollectionInterface(this Type type) { return type.GetInterfaces().Any(typ => typ.IsGenericType && typ.GetGenericTypeDefinition() == typeof(ICollection<>) ); } } } <file_sep># ExtensionSugar Yardımcı Küçük Kod Parçaları ## Kullanışlı Örnekler ### In , SplitTo: ```Csharp using System; using ExtensionSugar; namespace BosKonsolUygulamasi { class Program { static void Main(string[] args) { SplitToExample(); Console.ReadLine(); } static void SplitToExample() { string ListB = "3,5,7"; for (int i = 1; i < 10; i++) { if (i.ToString().In(ListB.SplitTo<string>(','))) { Console.WriteLine("Değer Bulundu. Değer : " + i.ToString()); } } } } } ``` ### With : ```Csharp using System; using ExtensionSugar; namespace BosKonsolUygulamasi { class Program { static void Main(string[] args) { WithExample(); Console.ReadLine(); } static void WithExample() { new TestClass() .With(tsc => { tsc.Prop1 = "Hakan UÇAR"; tsc.Prop2 = DateTime.Now; tsc.Prop3 = 12; tsc.Prop4 = true; Console.WriteLine(tsc); Console.WriteLine(tsc.Prop1); Console.WriteLine(tsc.Prop2); Console.WriteLine(tsc.Prop3); Console.WriteLine(tsc.Prop4); }); } } class TestClass { public string Prop1 { get; set; } public DateTime Prop2 { get; set; } public double Prop3 { get; set; } public bool Prop4 { get; set; } } } ``` ### ReturnSelf: ```CSharp using System; using ExtensionSugar; namespace BosKonsolUygulamasi { class Program { static void Main(string[] args) { ReturnSelfExample(); Console.ReadLine(); } static void ReturnSelfExample() { var A = "hakan uçar"; Console.WriteLine( A.ReturnSelf(o => !o.IsNullOrEmpty(), "Öle bişi yok") ); } } } ``` ## Generic Extensions public static bool In<T>(this T obj, params T[] args); public static bool In<T>(this T obj, string[] args); public static bool In<T>(this T obj, IEnumerable<T> args); public static bool In<T>(this T obj, List<T> args); public static bool IsNotNull<T>(this T obj); public static bool IsNull<T>(this T obj); public static T ReturnSelf<T>(this T Input, Func<T, bool> check, T failureValue) where T : class; public static T With<T>(this T item, Action<T> action); ## String Extensions public static string AppendPrefixIfMissing(this string val, string prefix, bool ignoreCase = true); public static string AppendSuffixIfMissing(this string val, string suffix, bool ignoreCase = true); public static string Capitalize(this string s); public static int CountOccurrences(this string val, string stringToMatch); public static string CreateHashSha256(string val); public static string CreateHashSha512(string val); public static string Decrypt(this string stringToDecrypt, string key); public static bool DoesNotEndWith(this string val, string suffix); public static bool DoesNotStartWith(this string val, string prefix); public static string DoubleQuotes(this string text); public static string Encrypt(this string stringToEncrypt, string key); public static bool EndsWithIgnoreCase(this string val, string suffix); public static string FirstCharacter(this string val); public static string Format(this string value, object arg0); public static string Format(this string value, params object[] args); public static int GetByteSize(this string val, Encoding encoding); public static string GetDefaultIfEmpty(this string myValue, string defaultValue); public static string GetEmptyStringIfNull(this string val); public static int? GetLength(string val); public static string GetNullIfEmptyString(this string myValue); public static T IfDefault<T>(this string s, T Result); public static bool IsAlpha(this string val); public static bool IsAlphaNumeric(this string val); public static bool IsDateTime(this string data, string dateFormat); public static bool IsEmailAddress(this string email); public static bool IsInteger(this string val); public static bool IsLength(this string val, int minCharLength, int maxCharLength); public static bool IsMaxLength(this string val, int maxCharLength); public static bool IsMinLength(this string val, int minCharLength); public static bool IsNull(this string val); public static bool IsNullOrEmpty(this string val); public static bool IsNumeric(this string val); public static bool IsValidIPv4(this string val); public static string LastCharacter(this string val); public static string Left(this string val, int length); public static string ParseStringToCsv(this string val); public static IDictionary<string, string> QueryStringToDictionary(this string queryString); public static string Quotes(this string text); public static string RemoveChars(this string s, params char[] chars); public static string RemovePrefix(this string val, string prefix, bool ignoreCase = true); public static string RemoveSuffix(this string val, string suffix, bool ignoreCase = true); public static string Replace(this string s, params char[] chars); public static string ReplaceLineFeeds(this string val); public static string Reverse(this string val); public static string ReverseSlash(this string val, int direction); public static string Right(this string val, int length); public static IEnumerable<T> SplitTo<T>(this string str, string[] separator) where T : IConvertible; public static IEnumerable<T> SplitTo<T>(this string str, params char[] separator) where T : IConvertible; public static IEnumerable<T> SplitTo<T>(this string str, StringSplitOptions options, params char[] separator) where T : IConvertible; public static bool StartsWithIgnoreCase(this string val, string prefix); public static string SurroundWith(this string text, string ends); public static bool ToBoolean(this string value); public static byte[] ToBytes(this string val); public static DateTime ToDateTime(this string s, string format = "ddMMyyyy", string cultureString = "tr-TR"); public static DateTime ToDateTime(this string s, string format, CultureInfo culture); public static decimal ToDecimal(this string value); public static T ToEnum<T>(this string value, T defaultValue = default(T)) where T : struct; public static short ToInt16(this string value); public static int ToInt32(this string value); public static long ToInt64(this string value); [IteratorStateMachine(typeof(<ToTextElements>d__46))] public static IEnumerable<string> ToTextElements(this string val); public static string Truncate(this string s, int maxLength); ## Attribute Extensions public static TValue GetAttributeValue<TAttribute, TValue>(this Type type, Func<TAttribute, TValue> valueSelector) where TAttribute : Attribute; public static TValue GetPropAttributeValue<TAttribute, TValue>(this Type type, string MemberName, Func<TAttribute, TValue> valueSelector, bool inherit = false) where TAttribute : Attribute; ## Type Extensions public static object ChangeStrType(this string aText, Type aType); public static object GetDefaultValue(this Type t); public static bool IsDateTime(this Type aType); public static bool IsDouble(this Type aType); public static bool IsInt(this Type aType); public static object ToDefault(this Type targetType); <file_sep>using System; using System.Collections.Generic; using System.Linq; using System.Reflection; using System.Text; using System.Threading.Tasks; namespace ExtensionSugar { public static class ReflectionExtSugars { public static PropertyInfo GetProperty<T>(this T ClassObject, string PropName) where T : class { return ClassObject.GetType().GetProperty(PropName); } public static object GetPropertyValue<T>(this T ClassObject, string PropName) where T : class { return ClassObject.GetProperty(PropName).GetValue(ClassObject); } } } <file_sep>using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace ExtensionSugar { public enum enTimeZone { DatelineStandardTime = 0, UTC11 = 1, AleutianStandardTime = 2, HawaiianStandardTime = 3, MarquesasStandardTime = 4, AlaskanStandardTime = 5, UTC09 = 6, PacificStandardTimeMexico = 7, UTC08 = 8, PacificStandard_Time = 9, USMountainStandardTime =10, MountainStandardTimeMexico =11, MountainStandardTime = 12, CentralStandardTimeMexico = 13, CentralStandardTime = 14, CentralAmericaStandardTime = 15, EasterIslandStandardTime = 16, CanadaCentralStandardTime = 17, SAPacificStandardTime = 18, EasternStandardTimeMexico = 19, EasternStandardTime = 20, HaitiStandardTime = 21, CubaStandardTime = 22, USEasternStandardTime = 23, TurksAndCaicosStandardTime = 24, ParaguayStandardTime = 25, AtlanticStandardTime = 26, CentralBrazilianStandardTime = 27, SAWesternStandardTime = 28, VenezuelaStandardTime = 29, PacificSAStandardTime = 30, NewfoundlandStandardTime = 31, TocantinsStandardTime = 32, ESouthAmericaStandardTime = 33, ArgentinaStandardTime = 34, SAEasternStandardTime = 35, GreenlandStandardTime = 36, MontevideoStandardTime = 37, MagallanesStandardTime = 38, SaintPierreStandardTime = 39, BahiaStandardTime = 40, UTC02 = 41, MidAtlanticStandardTime = 42, AzoresStandardTime = 43, CapeVerdeStandardTime = 44, UTC = 45, GMTStandardTime = 46, GreenwichStandardTime = 47, SaoTomeStandardTime = 48, MoroccoStandardTime = 49, WEuropeStandardTime = 50, CentralEuropeStandardTime = 51, RomanceStandardTime = 52, WCentralAfricaStandardTime = 53, CentralEuropeanStandardTime = 54, JordanStandardTime = 55, GTBStandardTime = 56, MiddleEastStandardTime = 57, EEuropeStandardTime = 58, WestBankStandardTime = 59, SouthAfricaStandardTime = 60, SudanStandardTime = 61, FLEStandardTime = 62, EgyptStandardTime = 63, KaliningradStandardTime = 64, IsraelStandardTime = 65, LibyaStandardTime = 66, NamibiaStandardTime = 67, SyriaStandardTime = 68, ArabicStandardTime = 69, ArabStandardTime = 70, BelarusStandardTime = 71, RussianStandardTime = 72, EAfricaStandardTime = 73, TurkeyStandardTime = 74, IranStandardTime = 75, ArabianStandardTime = 76, AstrakhanStandardTime = 77, AzerbaijanStandardTime = 78, CaucasusStandardTime = 79, MauritiusStandardTime = 80, SaratovStandardTime = 81, GeorgianStandardTime = 82, VolgogradStandardTime = 83, RussiaTimeZone3 = 84, AfghanistanStandardTime = 85, WestAsiaStandardTime = 86, QyzylordaStandardTime = 87, EkaterinburgStandardTime = 88, PakistanStandardTime = 89, IndiaStandardTime = 90, SriLankaStandardTime = 91, NepalStandardTime = 92, CentralAsiaStandardTime = 93, BangladeshStandardTime = 94, OmskStandardTime = 95, MyanmarStandardTime = 96, SEAsiaStandardTime = 97, AltaiStandardTime = 98, WMongoliaStandardTime = 99, NorthAsiaStandardTime = 100, NCentralAsiaStandardTime = 101, TomskStandardTime = 102, SingaporeStandardTime = 103, ChinaStandardTime = 104, WAustraliaStandardTime = 105, TaipeiStandardTime = 106, UlaanbaatarStandardTime = 107, NorthAsiaEastStandardTime = 108, AusCentralWStandardTime = 109, TransbaikalStandardTime = 110, TokyoStandardTime = 111, NorthKoreaStandardTime = 112, KoreaStandardTime = 113, YakutskStandardTime = 114, CenAustraliaStandardTime = 115, AUSCentralStandardTime = 116, EAustraliaStandardTime = 117, WestPacificStandardTime = 118, TasmaniaStandardTime = 119, AUSEasternStandardTime = 120, VladivostokStandardTime = 121, LordHoweStandardTime = 122, BougainvilleStandardTime = 123, MagadanStandardTime = 124, NorfolkStandardTime = 125, SakhalinStandardTime = 126, CentralPacificStandardTime = 127, RussiaTimeZone10 = 128, RussiaTimeZone11 = 129, NewZealandStandardTime = 130, UTC12 = 131, FijiStandardTime = 132, KamchatkaStandardTime = 133, ChathamIslandsStandardTime = 134, UTC13 = 135, TongaStandardTime = 136, SamoaStandardTime = 137, LineIslandsStandardTime = 138, } public static class DateTimeExtSugars { private static Dictionary<enTimeZone, string> ZoneIdName = new Dictionary<enTimeZone, string>() { {enTimeZone.DatelineStandardTime ,"Dateline Standard Time"}, {enTimeZone.UTC11 ,"UTC-11"}, {enTimeZone.AleutianStandardTime ,"Aleutian Standard Time"}, {enTimeZone.HawaiianStandardTime ,"Hawaiian Standard Time"}, {enTimeZone.MarquesasStandardTime ,"Marquesas Standard Time"}, {enTimeZone.AlaskanStandardTime ,"Alaskan Standard Time"}, {enTimeZone.UTC09 ,"UTC-09"}, {enTimeZone.PacificStandardTimeMexico ,"Pacific Standard Time (Mexico)"}, {enTimeZone.UTC08 ,"UTC-08"}, {enTimeZone.PacificStandard_Time ,"Pacific Standard Time"}, {enTimeZone.USMountainStandardTime ,"US Mountain Standard Time"}, {enTimeZone.MountainStandardTimeMexico ,"Mountain Standard Time (Mexico)"}, {enTimeZone.MountainStandardTime ,"Mountain Standard Time"}, {enTimeZone.CentralStandardTimeMexico ,"Central Standard Time (Mexico)"}, {enTimeZone.CentralStandardTime ,"Central Standard Time"}, {enTimeZone.CentralAmericaStandardTime ,"Central America Standard Time"}, {enTimeZone.EasterIslandStandardTime ,"Easter Island Standard Time"}, {enTimeZone.CanadaCentralStandardTime ,"Canada Central Standard Time"}, {enTimeZone.SAPacificStandardTime ,"SA Pacific Standard Time"}, {enTimeZone.EasternStandardTimeMexico ,"Eastern Standard Time (Mexico)"}, {enTimeZone.EasternStandardTime ,"Eastern Standard Time"}, {enTimeZone.HaitiStandardTime ,"Haiti Standard Time"}, {enTimeZone.CubaStandardTime ,"Cuba Standard Time"}, {enTimeZone.USEasternStandardTime,"US Eastern Standard Time"}, {enTimeZone.TurksAndCaicosStandardTime ,"Turks And Caicos Standard Time"}, {enTimeZone.ParaguayStandardTime ,"Paraguay Standard Time"}, {enTimeZone.AtlanticStandardTime ,"Atlantic Standard Time"}, {enTimeZone.CentralBrazilianStandardTime ,"Central Brazilian Standard Time"}, {enTimeZone.SAWesternStandardTime ,"SA Western Standard Time"}, {enTimeZone.VenezuelaStandardTime ,"Venezuela Standard Time"}, {enTimeZone.PacificSAStandardTime ,"Pacific SA Standard Time"}, {enTimeZone.NewfoundlandStandardTime,"Newfoundland Standard Time"}, {enTimeZone.TocantinsStandardTime ,"Tocantins Standard Time"}, {enTimeZone.ESouthAmericaStandardTime ,"E. South America Standard Time"}, {enTimeZone.ArgentinaStandardTime ,"Argentina Standard Time"}, {enTimeZone.SAEasternStandardTime ,"SA Eastern Standard Time"}, {enTimeZone.GreenlandStandardTime ,"Greenland Standard Time"}, {enTimeZone.MontevideoStandardTime ,"Montevideo Standard Time"}, {enTimeZone.MagallanesStandardTime ,"Magallanes Standard Time"}, {enTimeZone.SaintPierreStandardTime,"Saint Pierre Standard Time"}, {enTimeZone.BahiaStandardTime ,"Bahia Standard Time"}, {enTimeZone.UTC02 ,"UTC-02"}, {enTimeZone.MidAtlanticStandardTime ,"Mid-Atlantic Standard Time"}, {enTimeZone.AzoresStandardTime,"Azores Standard Time"}, {enTimeZone.CapeVerdeStandardTime ,"Cape Verde Standard Time"}, {enTimeZone.UTC ,"UTC"}, {enTimeZone.GMTStandardTime ,"GMT Standard Time"}, {enTimeZone.GreenwichStandardTime ,"Greenwich Standard Time"}, {enTimeZone.SaoTomeStandardTime ,"Sao Tome Standard Time"}, {enTimeZone.MoroccoStandardTime ,"Morocco Standard Time"}, {enTimeZone.WEuropeStandardTime ,"W. Europe Standard Time"}, {enTimeZone.CentralEuropeStandardTime ,"Central Europe Standard Time"}, {enTimeZone.RomanceStandardTime ,"Romance Standard Time"}, {enTimeZone.WCentralAfricaStandardTime ,"W. Central Africa Standard Time"}, {enTimeZone.CentralEuropeanStandardTime ,"Central European Standard Time"}, {enTimeZone.JordanStandardTime ,"Jordan Standard Time"}, {enTimeZone.GTBStandardTime ,"GTB Standard Time"}, {enTimeZone.MiddleEastStandardTime ,"Middle East Standard Time"}, {enTimeZone.EEuropeStandardTime ,"E. Europe Standard Time"}, {enTimeZone.WestBankStandardTime ,"West Bank Standard Time"}, {enTimeZone.SouthAfricaStandardTime ,"South Africa Standard Time"}, {enTimeZone.SudanStandardTime ,"Sudan Standard Time"}, {enTimeZone.FLEStandardTime ,"FLE Standard Time"}, {enTimeZone.EgyptStandardTime ,"Egypt Standard Time"}, {enTimeZone.KaliningradStandardTime,"Kaliningrad Standard Time"}, {enTimeZone.IsraelStandardTime ,"Israel Standard Time"}, {enTimeZone.LibyaStandardTime ,"Libya Standard Time"}, {enTimeZone.NamibiaStandardTime ,"Namibia Standard Time"}, {enTimeZone.SyriaStandardTime ,"Syria Standard Time"}, {enTimeZone.ArabicStandardTime ,"Arabic Standard Time"}, {enTimeZone.ArabStandardTime ,"Arab Standard Time"}, {enTimeZone.BelarusStandardTime ,"Belarus Standard Time"}, {enTimeZone.RussianStandardTime ,"Russian Standard Time"}, {enTimeZone.EAfricaStandardTime ,"E. Africa Standard Time"}, {enTimeZone.TurkeyStandardTime ,"Turkey Standard Time"}, {enTimeZone.IranStandardTime ,"Iran Standard Time"}, {enTimeZone.ArabianStandardTime ,"Arabian Standard Time"}, {enTimeZone.AstrakhanStandardTime ,"Astrakhan Standard Time"}, {enTimeZone.AzerbaijanStandardTime ,"Azerbaijan Standard Time"}, {enTimeZone.CaucasusStandardTime ,"Caucasus Standard Time"}, {enTimeZone.MauritiusStandardTime ,"Mauritius Standard Time"}, {enTimeZone.SaratovStandardTime ,"Saratov Standard Time"}, {enTimeZone.GeorgianStandardTime,"Georgian Standard Time"}, {enTimeZone.VolgogradStandardTime ,"Volgograd Standard Time"}, {enTimeZone.RussiaTimeZone3 ,"Russia Time Zone 3"}, {enTimeZone.AfghanistanStandardTime,"Afghanistan Standard Time"}, {enTimeZone.WestAsiaStandardTime,"West Asia Standard Time"}, {enTimeZone.QyzylordaStandardTime ,"Qyzylorda Standard Time"}, {enTimeZone.EkaterinburgStandardTime ,"Ekaterinburg Standard Time"}, {enTimeZone.PakistanStandardTime,"Pakistan Standard Time"}, {enTimeZone.IndiaStandardTime,"India Standard Time"}, {enTimeZone.SriLankaStandardTime,"Sri Lanka Standard Time"}, {enTimeZone.NepalStandardTime,"Nepal Standard Time"}, {enTimeZone.CentralAsiaStandardTime ,"Central Asia Standard Time"}, {enTimeZone.BangladeshStandardTime ,"Bangladesh Standard Time"}, {enTimeZone.OmskStandardTime ,"Omsk Standard Time"}, {enTimeZone.MyanmarStandardTime ,"Myanmar Standard Time"}, {enTimeZone.SEAsiaStandardTime ,"SE Asia Standard Time"}, {enTimeZone.AltaiStandardTime ,"Altai Standard Time"}, {enTimeZone.WMongoliaStandardTime ,"W. Mongolia Standard Time"}, {enTimeZone.NorthAsiaStandardTime ,"North Asia Standard Time"}, {enTimeZone.NCentralAsiaStandardTime ,"N. Central Asia Standard Time"}, {enTimeZone.TomskStandardTime ,"Tomsk Standard Time"}, {enTimeZone.SingaporeStandardTime ,"Singapore Standard Time"}, {enTimeZone.ChinaStandardTime ,"China Standard Time"}, {enTimeZone.WAustraliaStandardTime ,"W. Australia Standard Time"}, {enTimeZone.TaipeiStandardTime ,"Taipei Standard Time"}, {enTimeZone.UlaanbaatarStandardTime ,"Ulaanbaatar Standard Time"}, {enTimeZone.NorthAsiaEastStandardTime ,"North Asia East Standard Time"}, {enTimeZone.AusCentralWStandardTime ,"Aus Central W. Standard Time"}, {enTimeZone.TransbaikalStandardTime ,"Transbaikal Standard Time"}, {enTimeZone.TokyoStandardTime ,"Tokyo Standard Time"}, {enTimeZone.NorthKoreaStandardTime ,"North Korea Standard Time"}, {enTimeZone.KoreaStandardTime ,"Korea Standard Time"}, {enTimeZone.YakutskStandardTime ,"Yakutsk Standard Time"}, {enTimeZone.CenAustraliaStandardTime ,"Cen. Australia Standard Time"}, {enTimeZone.AUSCentralStandardTime ,"AUS Central Standard Time"}, {enTimeZone.EAustraliaStandardTime,"E. Australia Standard Time"}, {enTimeZone.WestPacificStandardTime ,"West Pacific Standard Time"}, {enTimeZone.TasmaniaStandardTime ,"Tasmania Standard Time"}, {enTimeZone.AUSEasternStandardTime ,"AUS Eastern Standard Time"}, {enTimeZone.VladivostokStandardTime ,"Vladivostok Standard Time"}, {enTimeZone.LordHoweStandardTime ,"Lord Howe Standard Time"}, {enTimeZone.BougainvilleStandardTime ,"Bougainville Standard Time"}, {enTimeZone.MagadanStandardTime ,"Magadan Standard Time"}, {enTimeZone.NorfolkStandardTime ,"Norfolk Standard Time"}, {enTimeZone.SakhalinStandardTime ,"Sakhalin Standard Time"}, {enTimeZone.CentralPacificStandardTime ,"Central Pacific Standard Time"}, {enTimeZone.RussiaTimeZone10 ,"Russia Time Zone 10"}, {enTimeZone.RussiaTimeZone11 ,"Russia Time Zone 11"}, {enTimeZone.NewZealandStandardTime ,"New Zealand Standard Time"}, {enTimeZone.UTC12 ,"UTC+12"}, {enTimeZone.FijiStandardTime ,"Fiji Standard Time"}, {enTimeZone.KamchatkaStandardTime,"Kamchatka Standard Time"}, {enTimeZone.ChathamIslandsStandardTime ,"Chatham Islands Standard Time"}, {enTimeZone.UTC13 ,"UTC+13"}, {enTimeZone.TongaStandardTime ,"Tonga Standard Time"}, {enTimeZone.SamoaStandardTime ,"Samoa Standard Time"}, {enTimeZone.LineIslandsStandardTime ,"Line Islands Standard Time"} }; public static DateTime ToZoneLocalDate(this DateTime LocalDate,enTimeZone Zone) { TimeZoneInfo cstZone = TimeZoneInfo.FindSystemTimeZoneById(ZoneIdName[Zone]); DateTime utcTime = LocalDate.ToUniversalTime(); return TimeZoneInfo.ConvertTimeFromUtc(utcTime, cstZone); } } } <file_sep>using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace ExtensionSugar { public static class AttributeExtSugar { public static TValue GetAttributeValue<TAttribute, TValue>(this Type type,Func<TAttribute, TValue> valueSelector) where TAttribute : Attribute { var att = type.GetCustomAttributes( typeof(TAttribute), true ).FirstOrDefault() as TAttribute; if (att != null) { return valueSelector(att); } return default(TValue); } public static TValue GetPropAttributeValue<TAttribute, TValue>( this Type type, string MemberName, Func<TAttribute, TValue> valueSelector, bool inherit = false) where TAttribute : Attribute { var att = type .GetMember(MemberName) .FirstOrDefault() .GetCustomAttributes( typeof(TAttribute), inherit ) .FirstOrDefault() as TAttribute; if (att != null) { return valueSelector(att); } return default(TValue); } } } <file_sep>using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Reflection; using System.Text; using System.Threading.Tasks; namespace ExtensionSugar { public static class ResourceExtSugars { public static string Read(ReadType rdType, string resourceName) { Assembly assembly = null; switch (rdType) { case ReadType.Calling: assembly = Assembly.GetCallingAssembly(); break; case ReadType.Entry: assembly = Assembly.GetEntryAssembly(); break; case ReadType.Executing: assembly = Assembly.GetExecutingAssembly(); break; default: break; } using (Stream stream = assembly.GetManifestResourceStream(resourceName)) using (StreamReader reader = new StreamReader(stream)) { return reader.ReadToEnd(); } } public static string Read(Assembly oAsmp, string resourceName) { using (Stream stream = oAsmp.GetManifestResourceStream(resourceName)) using (StreamReader reader = new StreamReader(stream)) { return reader.ReadToEnd(); } } } public enum ReadType { Calling = 0, Entry = 1, Executing = 2 } } <file_sep>using System; using System.Collections.Generic; using System.Dynamic; using System.Globalization; using System.Linq; using System.Security.Cryptography; using System.Text; using System.Text.RegularExpressions; namespace ExtensionSugar { public static class StringExtSugars { public static bool IsDateTime(this string data, string dateFormat) { // ReSharper disable once RedundantAssignment DateTime dateVal = default(DateTime); return DateTime.TryParseExact(data, dateFormat, CultureInfo.InvariantCulture, DateTimeStyles.None, out dateVal); } public static int ToInt32(this string value) { int number; Int32.TryParse(value, out number); return number; } public static long ToInt64(this string value) { long number; Int64.TryParse(value, out number); return number; } public static short ToInt16(this string value) { short number; Int16.TryParse(value, out number); return number; } public static Decimal ToDecimal(this string value) { Decimal number; Decimal.TryParse(value, NumberStyles.Any, new NumberFormatInfo() { NumberDecimalSeparator = "," }, out number); return number; } public static bool ToBoolean(this string value) { if (string.IsNullOrEmpty(value) || string.IsNullOrWhiteSpace(value)) { throw new ArgumentException("value"); } string val = value.ToLower().Trim(); switch (val) { case "false": return false; case "f": return false; case "true": return true; case "t": return true; case "yes": return true; case "no": return false; case "y": return true; case "n": return false; default: throw new ArgumentException("Invalid boolean"); } } public static IEnumerable<T> SplitTo<T>(this string str, params char[] separator) where T : IConvertible { return str.Split(separator, StringSplitOptions.None).Select(s => (T)Convert.ChangeType(s, typeof(T))); } public static IEnumerable<T> SplitTo<T>(this string str, StringSplitOptions options, params char[] separator) where T : IConvertible { return str.Split(separator, options).Select(s => (T)Convert.ChangeType(s, typeof(T))); } public static T ToEnum<T>(this string value, T defaultValue = default(T)) where T : struct { if (!typeof(T).IsEnum) { throw new ArgumentException("Type T Must of type System.Enum"); } T result; bool isParsed = Enum.TryParse(value, true, out result); return isParsed ? result : defaultValue; } public static string Format(this string value, object arg0) { return string.Format(value, arg0); } public static string Format(this string value, params object[] args) { return string.Format(value, args); } public static string GetEmptyStringIfNull(this string val) { return (val != null ? val.Trim() : ""); } public static string GetNullIfEmptyString(this string myValue) { if (myValue == null || myValue.Length <= 0) { return null; } myValue = myValue.Trim(); if (myValue.Length > 0) { return myValue; } return null; } public static bool IsInteger(this string val) { // Variable to collect the Return value of the TryParse method. // Define variable to collect out parameter of the TryParse method. If the conversion fails, the out parameter is zero. int retNum; // The TryParse method converts a string in a specified style and culture-specific format to its double-precision floating point number equivalent. // The TryParse method does not generate an exception if the conversion fails. If the conversion passes, True is returned. If it does not, False is returned. bool isNum = Int32.TryParse(val, NumberStyles.Any, NumberFormatInfo.InvariantInfo, out retNum); return isNum; } public static string Capitalize(this string s) { if (s.Length == 0) { return s; } return s.Substring(0, 1).ToUpper() + s.Substring(1).ToLower(); } public static string FirstCharacter(this string val) { return (!string.IsNullOrEmpty(val)) ? (val.Length >= 1) ? val.Substring(0, 1) : val : null; } public static string LastCharacter(this string val) { return (!string.IsNullOrEmpty(val)) ? (val.Length >= 1) ? val.Substring(val.Length - 1, 1) : val : null; } public static bool EndsWithIgnoreCase(this string val, string suffix) { if (val == null) { throw new ArgumentNullException("val", "val parameter is null"); } if (suffix == null) { throw new ArgumentNullException("suffix", "suffix parameter is null"); } if (val.Length < suffix.Length) { return false; } return val.EndsWith(suffix, StringComparison.InvariantCultureIgnoreCase); } public static bool StartsWithIgnoreCase(this string val, string prefix) { if (val == null) { throw new ArgumentNullException("val", "val parameter is null"); } if (prefix == null) { throw new ArgumentNullException("prefix", "prefix parameter is null"); } if (val.Length < prefix.Length) { return false; } return val.StartsWith(prefix, StringComparison.InvariantCultureIgnoreCase); } public static string Replace(this string s, params char[] chars) { return chars.Aggregate(s, (current, c) => current.Replace(c.ToString(CultureInfo.InvariantCulture), "")); } public static string RemoveChars(this string s, params char[] chars) { var sb = new StringBuilder(s.Length); foreach (char c in s.Where(c => !chars.Contains(c))) { sb.Append(c); } return sb.ToString(); } public static bool IsEmailAddress(this string email) { string pattern = "^[a-zA-Z][\\w\\.-]*[a-zA-Z0-9]@[a-zA-Z0-9][\\w\\.-]*[a-zA-Z0-9]\\.[a-zA-Z][a-zA-Z\\.]*[a-zA-Z]$"; return Regex.Match(email, pattern).Success; } public static bool IsNumeric(this string val) { // Variable to collect the Return value of the TryParse method. // Define variable to collect out parameter of the TryParse method. If the conversion fails, the out parameter is zero. double retNum; // The TryParse method converts a string in a specified style and culture-specific format to its double-precision floating point number equivalent. // The TryParse method does not generate an exception if the conversion fails. If the conversion passes, True is returned. If it does not, False is returned. bool isNum = Double.TryParse(val, NumberStyles.Any, NumberFormatInfo.InvariantInfo, out retNum); return isNum; } public static string Truncate(this string s, int maxLength) { if (String.IsNullOrEmpty(s) || maxLength <= 0) { return String.Empty; } if (s.Length > maxLength) { return s.Substring(0, maxLength) + "..."; } return s; } public static string GetDefaultIfEmpty(this string myValue, string defaultValue) { if (!String.IsNullOrEmpty(myValue)) { myValue = myValue.Trim(); return myValue.Length > 0 ? myValue : defaultValue; } return defaultValue; } public static byte[] ToBytes(this string val) { var bytes = new byte[val.Length * sizeof(char)]; Buffer.BlockCopy(val.ToCharArray(), 0, bytes, 0, bytes.Length); return bytes; } public static string Reverse(this string val) { var chars = new char[val.Length]; for (int i = val.Length - 1, j = 0; i >= 0; --i, ++j) { chars[j] = val[i]; } val = new String(chars); return val; } public static string ParseStringToCsv(this string val) { return '"' + GetEmptyStringIfNull(val).Replace("\"", "\"\"") + '"'; } public static string Encrypt(this string stringToEncrypt, string key) { var cspParameter = new CspParameters { KeyContainerName = key }; var rsaServiceProvider = new RSACryptoServiceProvider(cspParameter) { PersistKeyInCsp = true }; byte[] bytes = rsaServiceProvider.Encrypt(Encoding.UTF8.GetBytes(stringToEncrypt), true); return BitConverter.ToString(bytes); } public static string Decrypt(this string stringToDecrypt, string key) { var cspParamters = new CspParameters { KeyContainerName = key }; var rsaServiceProvider = new RSACryptoServiceProvider(cspParamters) { PersistKeyInCsp = true }; string[] decryptArray = stringToDecrypt.Split(new[] { "-" }, StringSplitOptions.None); byte[] decryptByteArray = Array.ConvertAll(decryptArray, (s => Convert.ToByte(byte.Parse(s, NumberStyles.HexNumber)))); byte[] bytes = rsaServiceProvider.Decrypt(decryptByteArray, true); string result = Encoding.UTF8.GetString(bytes); return result; } public static int CountOccurrences(this string val, string stringToMatch) { return Regex.Matches(val, stringToMatch, RegexOptions.IgnoreCase).Count; } public static string RemovePrefix(this string val, string prefix, bool ignoreCase = true) { if (!string.IsNullOrEmpty(val) && (ignoreCase ? val.StartsWithIgnoreCase(prefix) : val.StartsWith(prefix))) { return val.Substring(prefix.Length, val.Length - prefix.Length); } return val; } public static string RemoveSuffix(this string val, string suffix, bool ignoreCase = true) { if (!string.IsNullOrEmpty(val) && (ignoreCase ? val.EndsWithIgnoreCase(suffix) : val.EndsWith(suffix))) { return val.Substring(0, val.Length - suffix.Length); } return null; } public static string AppendSuffixIfMissing(this string val, string suffix, bool ignoreCase = true) { if (string.IsNullOrEmpty(val) || (ignoreCase ? val.EndsWithIgnoreCase(suffix) : val.EndsWith(suffix))) { return val; } return val + suffix; } public static string AppendPrefixIfMissing(this string val, string prefix, bool ignoreCase = true) { if (string.IsNullOrEmpty(val) || (ignoreCase ? val.StartsWithIgnoreCase(prefix) : val.StartsWith(prefix))) { return val; } return prefix + val; } public static bool IsAlpha(this string val) { if (string.IsNullOrEmpty(val)) { return false; } return val.Trim().Replace(" ", "").All(Char.IsLetter); } public static bool IsAlphaNumeric(this string val) { if (string.IsNullOrEmpty(val)) { return false; } return val.Trim().Replace(" ", "").All(Char.IsLetterOrDigit); } public static string CreateHashSha512(string val) { if (string.IsNullOrEmpty(val)) { throw new ArgumentException("val"); } var sb = new StringBuilder(); using (SHA512 hash = SHA512.Create()) { byte[] data = hash.ComputeHash(val.ToBytes()); foreach (byte b in data) { sb.Append(b.ToString("x2")); } } return sb.ToString(); } public static string CreateHashSha256(string val) { if (string.IsNullOrEmpty(val)) { throw new ArgumentException("val"); } var sb = new StringBuilder(); using (SHA256 hash = SHA256.Create()) { byte[] data = hash.ComputeHash(val.ToBytes()); foreach (byte b in data) { sb.Append(b.ToString("x2")); } } return sb.ToString(); } public static IDictionary<string, string> QueryStringToDictionary(this string queryString) { if (string.IsNullOrWhiteSpace(queryString)) { return null; } if (!queryString.Contains("?")) { return null; } string query = queryString.Replace("?", ""); if (!query.Contains("=")) { return null; } return query.Split('&').Select(p => p.Split('=')).ToDictionary( key => key[0].ToLower().Trim(), value => value[1]); } public static string ReverseSlash(this string val, int direction) { switch (direction) { case 0: return val.Replace(@"/", @"\"); case 1: return val.Replace(@"\", @"/"); default: return val; } } public static string ReplaceLineFeeds(this string val) { return Regex.Replace(val, @"^[\r\n]+|\.|[\r\n]+$", ""); } public static bool IsValidIPv4(this string val) { if (string.IsNullOrEmpty(val)) { return false; } return Regex.Match(val, @"(?:^|\s)([a-z]{3,6}(?=://))?(://)?((?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?))(?::(\d{2,5}))?(?:\s|$)") .Success; } public static int GetByteSize(this string val, Encoding encoding) { if (val == null) { throw new ArgumentNullException("val"); } if (encoding == null) { throw new ArgumentNullException("encoding"); } return encoding.GetByteCount(val); } public static string Left(this string val, int length) { if (string.IsNullOrEmpty(val)) { throw new ArgumentNullException("val"); } if (length < 0 || length > val.Length) { throw new ArgumentOutOfRangeException("length", "length cannot be higher than total string length or less than 0"); } return val.Substring(0, length); } public static string Right(this string val, int length) { if (string.IsNullOrEmpty(val)) { throw new ArgumentNullException("val"); } if (length < 0 || length > val.Length) { throw new ArgumentOutOfRangeException("length", "length cannot be higher than total string length or less than 0"); } return val.Substring(val.Length - length); } public static IEnumerable<string> ToTextElements(this string val) { if (val == null) { throw new ArgumentNullException("val"); } TextElementEnumerator elementEnumerator = StringInfo.GetTextElementEnumerator(val); while (elementEnumerator.MoveNext()) { string textElement = elementEnumerator.GetTextElement(); yield return textElement; } } public static bool DoesNotStartWith(this string val, string prefix) { return val == null || prefix == null || !val.StartsWith(prefix, StringComparison.InvariantCulture); } public static bool DoesNotEndWith(this string val, string suffix) { return val == null || suffix == null || !val.EndsWith(suffix, StringComparison.InvariantCulture); } public static bool IsNull(this string val) { return val == null; } public static bool IsNullOrEmpty(this string val) { return String.IsNullOrEmpty(val); } public static bool IsMinLength(this string val, int minCharLength) { return val != null && val.Length >= minCharLength; } public static bool IsMaxLength(this string val, int maxCharLength) { return val != null && val.Length <= maxCharLength; } public static bool IsLength(this string val, int minCharLength, int maxCharLength) { return val != null && val.Length >= minCharLength && val.Length <= minCharLength; } public static int? GetLength(string val) { return val == null ? (int?)null : val.Length; } public static string Quotes(this string text) { return SurroundWith(text, "'"); } public static string DoubleQuotes(this string text) { return SurroundWith(text, "\""); } public static string SurroundWith(this string text, string ends) { return ends + text + ends; } public static DateTime ToDateTime(this string s, string format = "ddMMyyyy", string cultureString = "tr-TR") { try { var r = DateTime.ParseExact( s: s, format: format, provider: CultureInfo.GetCultureInfo(cultureString)); return r; } catch (FormatException) { throw; } catch (CultureNotFoundException) { throw; // Given Culture is not supported culture } } public static DateTime ToDateTime(this string s, string format, CultureInfo culture) { try { var r = DateTime.ParseExact(s: s, format: format, provider: culture); return r; } catch (FormatException) { throw; } catch (CultureNotFoundException) { throw; // Given Culture is not supported culture } } public static T IfDefault<T>(this string s, T Result) { if (s.IsNullOrEmpty()) return Result; else return (T)s.GetType().ToDefault(); } public static IEnumerable<T> SplitTo<T>(this string str, String[] separator) where T : IConvertible { return str.Split(separator, StringSplitOptions.None).Select(s => (T)Convert.ChangeType(s, typeof(T))); } public static string Base64Encode(this string plainText) { var plainTextBytes = System.Text.Encoding.UTF8.GetBytes(plainText); return System.Convert.ToBase64String(plainTextBytes); } public static string Base64Decode(this string base64EncodedData) { var base64EncodedBytes = System.Convert.FromBase64String(base64EncodedData); return System.Text.Encoding.UTF8.GetString(base64EncodedBytes); } public static object ChangeStrType(this string aText, Type aType) { NumberFormatInfo nfi = new NumberFormatInfo(); nfi.CurrencyDecimalSeparator = ","; nfi.CurrencyGroupSeparator = ""; nfi.PercentDecimalSeparator = ","; nfi.PercentGroupSeparator = ""; nfi.NumberDecimalSeparator = ","; nfi.NumberGroupSeparator = ""; switch (Type.GetTypeCode(aType)) { case TypeCode.Boolean: return Convert.ChangeType(aText, aType); case TypeCode.Byte: return Convert.ChangeType(aText, aType); case TypeCode.Char: return Convert.ChangeType(aText, aType); case TypeCode.DBNull: return Convert.ChangeType(aText, aType); case TypeCode.DateTime: return Convert.ChangeType(aText, aType); case TypeCode.Decimal: return aText.IsNullOrEmpty() ? 0 : Convert.ChangeType(aText.Replace(".", ","), aType); case TypeCode.Double: return aText.IsNullOrEmpty() ? 0 : Convert.ChangeType(Convert.ChangeType(aText, aType).ToString().Replace(".", ","), aType); case TypeCode.Empty: return Convert.ChangeType(aText, aType); case TypeCode.Int16: return aText.IsNullOrEmpty() ? 0 : Convert.ChangeType(aText, aType); case TypeCode.Int32: return aText.IsNullOrEmpty() ? 0 : Convert.ChangeType(aText, aType); case TypeCode.Int64: return Convert.ChangeType(aText, aType); case TypeCode.Object: return Convert.ChangeType(aText, aType); case TypeCode.SByte: return Convert.ChangeType(aText, aType); case TypeCode.Single: return Convert.ChangeType(aText, aType); case TypeCode.String: return Convert.ChangeType(aText, aType); case TypeCode.UInt16: return Convert.ChangeType(aText, aType); case TypeCode.UInt32: return Convert.ChangeType(aText, aType); case TypeCode.UInt64: return Convert.ChangeType(aText, aType); default: return Convert.ChangeType(aText, aType); } } public static Guid ToGuid(this string Str) { return Guid.Parse(Str); } } }
c7718d05c2ac7c5b5fcd77eeb627a3d84048c117
[ "Markdown", "C#" ]
9
C#
HakanUcaar/ExtensionSugar
8a285b7023ef413bd0ed86de490e5f72482433a4
a14c0094b3a5d7d25c9be39411d35e1a37790a9d
refs/heads/master
<repo_name>iregon/Mercalinks<file_sep>/www/templates/home/controller.js angular.module('starter') .controller('HomeCtrl', function($scope, $http, $stateParams, $rootScope){ // $scope.chiamataHttp(function(){ var page = 1; var fine = 0; var action = ""; $scope.loadAnnunci = function() { var link = "http://mercalinks.altervista.org/select3.php"; $http.get(link, { params: { tabella: "annunci", page: page } }).then(function(response) { annunci1 = response.data.annunci; if($rootScope.cat!=null){ for(var i=0;i<annunci1.length;i++){ if(annunci[i].id_categoria == $rootScope.cat){ $scope.annunci.push(annunci1[i]); } } }else{ $scope.annunci=annunci1; } if($rootScope.prezzo!=null){ if($rootScope.prezzo==1){ $scope.annunci=annunci1.sort(); } if($rootScope.prezzo==2){ $scope.annunci=annunci1.reverse(); } }else{ $scope.annunci=annunci1; } fine = response.data.fine; if (action === "refresh") $scope.$broadcast('scroll.refreshComplete'); if (action === "scroll") $scope.$broadcast('scroll.infiniteScrollComplete'); }).catch(function(error) { console.log(error); }); } $scope.moreData = function() { if (fine == 0) return true; else return false; } $scope.doRefresh = function(){ page = 1; fine = 0; action = "refresh"; $scope.loadAnnunci(); } $scope.loadMore = function() { if (fine == 0) { page++; action = "scroll"; $scope.loadAnnunci(); } } }); <file_sep>/www/templates/login/controller.js angular.module('starter') .controller('LoginCtrl', function($scope, $http, $timeout, $localStorage, $ionicPopup) { $scope.closeLogin = function() { // $scope.modal.hide(); }; function showPopup(text) { var alertPopUp = $ionicPopup.show({ title:"<img src='/img/error.png'>", subTitle: "<h4>" + text + "</h4>", buttons: [{ text: "OK", type: "button-default" }] }).then(function(res) { }); } // Perform the login action when the user submits the login form $scope.doLogin = function() { var link = "http://mercalinks.altervista.org/login.php"; var cryptedPass = CryptoJS.SHA1($scope.loginData.password.trim()); // console.log(cryptedPass.toString(CryptoJS.enc.Base64)); var param = $scope.loginData.email.trim() + "###" + cryptedPass + "###"; var iv = CryptoJS.enc.Hex.parse("abcdef9876543210abcdef9876543210"); var key = CryptoJS.enc.Hex.parse("<KEY>"); var hashDigest = CryptoJS.AES.encrypt(param, key, {iv:iv}); $http.get(link, { params: { str: hashDigest.ciphertext.toString(CryptoJS.enc.Base64) } }).then(function(response) { var res = response.data; console.log(res["status"]); if(res["status"] === "OK") { // console.log($scope.res); $localStorage.id_utente = res; var date = new Date(); $localStorage.last_login = date.getFullYear() + "/" + (date.getMonth() + 1) + "/" + date.getDate(); console.log($localStorage.last_login); window.location.href = "#/app/annunciUtente"; } else { showPopup("E-mail e/o password errati"); } }).catch(function(error) { console.log(error); }); }; }); <file_sep>/www/templates/annuncio/controller.js angular.module('starter') .controller('AnnuncioCtrl', function($scope, $stateParams, $http, $ionicPopup, $window, $localStorage) { var link = "http://mercalinks.altervista.org/select2.php"; $scope.id=$stateParams.annuncioId; $scope.addPref = function(id) { var pref = $localStorage.preferiti; pref.push("" + id + ""); $localStorage.preferiti = pref; document.getElementById("btnRemPref").classList.add("annuncio_add_pref"); document.getElementById("btnAddPref").classList.remove("annuncio_rem_pref"); }; $scope.isPref = function() { var pref = $localStorage.preferiti; if(pref.indexOf($scope.id) > -1) return true; else return false; } $scope.remPref = function(id) { var pref = $localStorage.preferiti; var index = pref.indexOf("" + id + ""); if (index > -1) { pref.splice(index, 1); } $localStorage.preferiti = pref; document.getElementById("btnRemPref").classList.remove("annuncio_add_pref"); document.getElementById("btnAddPref").classList.add("annuncio_rem_pref"); }; $scope.contatta = function(){ $scope.alertPopup = $ionicPopup.show({ scope: $scope, title: 'Contatta', buttons: [ { text: 'Cancel' } ], templateUrl: 'templates/annuncio/contattaTemplate.html' }); console.log($scope.buttons); }; $scope.mailUser = function(utente,annuncio){ cordova.plugins.email.open({ to: utente.email, subject: 'Mercalinks ad: '+ annuncio.titolo, body: '' }); };//end mailUser() $scope.close=function(){ $scope.alertPopup.close(); } $http.get(link,{ params:{ id: $scope.id, tabella: "annunci" } }).then( function(response){ $scope.annuncio=response.data.annunci; $scope.annuncio.data_in = $scope.annuncio.data_in.split(" ")[0]; console.log(response.data.annuncio); $http.get(link,{ params:{ id: $scope.annuncio.id_utente, tabella: "utenti" } }).then( function(response){ $scope.utente=response.data.utenti; console.log(response.data.utente); } ).catch(function(error){ console.log(error); }); $http.get(link,{ params:{ id: $scope.annuncio.id_comune, tabella: "comuni" } }).then( function(response){ $scope.comune=response.data.comuni; console.log(response.data.comune); } ).catch(function(error){ console.log(error); }); } ).catch(function(error){ console.log(error); }); }); <file_sep>/www/templates/edit/controller.js angular.module('starter') .controller('EditCtrl',function($scope,$http,$ionicPopup,$localStorage){ //$scope.testFile={}; var link = "http://mercalinks.altervista.org/select1.php"; $http.get(link, { params: { tabella: "categorie" } }).then(function(response) { $scope.categorie = response.data.categorie; console.log($scope.categorie); }).catch(function(error) { console.log(error); }); $scope.controllo=function(){ if(insert.titolo.value===""){ var alertPopup=$ionicPopup.show({ title:'Inserisci il titolo', buttons:[{ text:'OK', type: 'button-default' }] }); alertPopup.then(function(res){ console.log(res); }); return false; }else{ if(insert.descrizione.value===""){ var alertPopup=$ionicPopup.show({ title:'Inserisci la descrizione', buttons:[{ text:'OK', type: 'button-default' }] }); alertPopup.then(function(res){ console.log(res); }); return false; } else{ if(insert.prezzo.value===""){ var alertPopup=$ionicPopup.show({ title:'Inserisci il prezzo', buttons:[{ text:'OK', type: 'button-default' }] }); alertPopup.then(function(res){ console.log(res); }); return false; } else{ $scope.messaggio(); } } } return true; } $scope.messaggio=function(){ var alertPopup=$ionicPopup.show({ title:'<img src="/img/tick.png">', subTitle:'<h4>Annuncio inserito</h4>', buttons:[{ text:'OK', type: 'button-default' }] }); alertPopup.then(function(res){ console.log(res); }); }; $scope.inserisci=function(){ $scope.testFile = {}; if($scope.controllo()){ var link= 'http://mercalinks.altervista.org/edit1.php'; $http.get(link,{ params:{ tabella:'annunci' } }).then(function (res){ $scope.response = res.data; }); }; } }); <file_sep>/www/templates/preferiti/controller.js angular.module('starter') .controller('PreferitiCtrl', function($scope, $http, $localStorage){ var link = "http://mercalinks.altervista.org/select1.php"; $http.get(link, { params: { tabella: "annunci" } }).then(function(response) { var annunci = response.data.annunci; var annunci2 = []; var pref = $localStorage.preferiti; annunci.forEach(function(element) { pref.forEach(function(preferito) { if(element["id_annuncio"] == preferito) { annunci2.push(element); } }); }); $scope.annunci = annunci2; $scope.$broadcast('scroll.refreshComplete'); }).catch(function(error) { console.log(error); }); $scope.doRefresh= function(){ $http.get(link, { params: { tabella: "annunci" } }).then(function(response) { var annunci = response.data.annunci; var annunci2 = []; var pref = $localStorage.preferiti; annunci.forEach(function(element) { pref.forEach(function(preferito) { if(element["id_annuncio"] == preferito) { annunci2.push(element); } }); }); $scope.annunci = annunci2; $scope.$broadcast('scroll.refreshComplete'); }).catch(function(error) { console.log(error); }); } }); <file_sep>/www/js/controllers.js angular.module('starter.controllers', ['ngCordova']) .factory('Annunci', function(){ var annunci = {}; return{ getAnnunci: function(){ return annunci; }, setAnnunci: function(param){ annunci = param; } } }) .controller('AppCtrl', function($scope, $ionicModal, $timeout, $localStorage, $http, $rootScope) { // With the new view caching in Ionic, Controllers are only called // when they are recreated or on app start, instead of every page change. // To listen for when this page is active (for example, to refresh data), // listen for the $ionicView.enter event: //$scope.$on('$ionicView.enter', function(e) { //}); // Form data for the login modal $scope.loginData = {}; // Create the login modal that we will use later $ionicModal.fromTemplateUrl('templates/login/login.html', { scope: $scope }).then(function(modal) { $scope.modal = modal; }); // Open the login modal $scope.login = function() { $scope.modal.show(); }; $scope.logout = function() { $localStorage.id_utente = ""; window.location.href = "#/app/home"; }; $scope.is_user_logged = function() { // console.log($localStorage.id_utente.utente); var user_info = $localStorage.id_utente; if($localStorage.id_utente.utente != undefined) { //console.log("Logged"); $scope.user_data = $localStorage.id_utente; return true; } else { //console.log("Not logged"); return false; } }; /*Categorie*/ var link = "http://mercalinks.altervista.org/select1.php"; $http.get(link, { params: { tabella: "categorie" } }).then(function(response) { $scope.categorie = response.data.categorie; }).catch(function(error) { console.log(error); }); var link = "http://mercalinks.altervista.org/getAllProvince.php"; $http.get(link, { }).then(function(response) { $scope.province=response.data.province; }).catch(function(error) { console.log(error); }); Array.prototype.indexOf || (Array.prototype.indexOf = function(d, e) { var a; if (null == this) throw new TypeError('"this" is null or not defined'); var c = Object(this), b = c.length >>> 0; if (0 === b) return -1; a = +e || 0; Infinity === Math.abs(a) && (a = 0); if (a >= b) return -1; for (a = Math.max(0 <= a ? a : b - Math.abs(a), 0); a < b;) { if (a in c && c[a] === d) return a; a++ } return -1 }); }) <file_sep>/www/templates/annunciUtente/controller.js angular.module('starter') .controller('AnnunciUtenteCtrl', function($scope, $state, $http, $timeout, $localStorage, $stateParams, $ionicPopup) { var link = "http://mercalinks.altervista.org/select1.php"; var idU=$localStorage.id_utente["utente"]["id_utente"]; $scope.nomeU=$localStorage.id_utente["utente"]["nome"]; $scope.annunciU = []; //filtro gli annunci tenendomi quelli dell'utente $http.get(link, { params: { tabella: "annunci" } }).then(function(response) { var annunci = response.data.annunci; for(var i=0;i<annunci.length;i++){ if(annunci[i].id_utente == idU){ $scope.annunciU.push(annunci[i]); } } }).catch(function(error) { console.log(error); }); $scope.gotoEdit = function(){ $state.go('app.edit'); } $scope.doRefresh= function(){ $scope.annunciU=[]; $scope.annunciU.length=0; console.log($scope.annunciU); $http.get(link, { params: { tabella: "annunci" } }).then(function(response) { var annunci = response.data.annunci; for(var i=0;i<annunci.length;i++){ if(annunci[i].id_utente == idU){ $scope.annunciU.push(annunci[i]); } } }).catch(function(error) { console.log(error); }).finally(function() { // Stop the ion-refresher from spinning $scope.$broadcast('scroll.refreshComplete'); }); } // $scope.toDoOnLongPress = function(){ // console.log("ok1"); // } // // $scope.toDoOnTouchEnd = function(){ // console.log("ok2") // } // $scope.deleteItem = function(){ // console.log("ok3"); // } $scope.showConfirm = function(id) { var link = "http://mercalinks.altervista.org/delete1.php"; var confirmPopup = $ionicPopup.confirm({ title: 'Elimina', template: 'Vuoi davvero eliminare questo annuncio?', buttons: [{ // Array[Object] (optional). Buttons to place in the popup footer. text: 'Annulla', type: 'button-default', }, { text: 'OK', type: 'button-positive', onTap: function(e) { $http.get(link, { params: { // tabella: 'annunci', id: id } }).then(function(){ // console.log("suca"); }) $scope.doRefresh(); } }] }); // confirmPopup.then(function(res) { // if(res) { // // } // }); }; }) // .directive('onLongPress', function($timeout){ // return{ // restrict: 'A', // link: function($scope, $elm, $attrs) { // $elm.bind('touchstart', function(evt) { // // Locally scoped variable that will keep track of the long press // $scope.longPress = true; // // // We'll set a timeout for 600 ms for a long press // $timeout(function() { // if ($scope.longPress) { // // If the touchend event hasn't fired, // // apply the function given in on the element's on-long-press attribute // $scope.$apply(function() { // $scope.$eval($attrs.onLongPress) // }); // } // }, 600); // }); // // $elm.bind('touchend', function(evt) { // // Prevent the onLongPress event from firing // $scope.longPress = false; // // if ($attrs.onTouchEnd) { // $scope.$apply(function() { // $scope.$eval($attrs.onTouchEnd) // }); // } // }); // } // }; // })
ab799127271f3c1777d5f308888ae1fc669db965
[ "JavaScript" ]
7
JavaScript
iregon/Mercalinks
a4961a404ef7512f6802d4953e874b58e01780f7
853ca8ea934ffd983063e6df7b1f03619cddfc41
refs/heads/master
<repo_name>AJMartel/arduino_sd_recovery<file_sep>/sd_recovery_files.ino #include <SD.h> File root; void setup() { Serial.begin(115200); pinMode(10, OUTPUT); SD.begin(10); root = SD.open("/"); readDirectory(root, ""); Serial.println("done!"); } void loop() { // nothing happens after setup finishes. } void readDirectory(File dir, String folder) { boolean files = true; while(files) { File entry = dir.openNextFile(); if (! entry) { files = false; } else { if (entry.isDirectory()) { String folder_new = folder; folder_new += entry.name(); folder_new += "/"; readDirectory(entry, folder_new); } else { outputFile(entry, folder); } } } } void outputFile(File entry, String folder) { Serial.print("--- "); Serial.print(folder); Serial.print(entry.name()); Serial.print(";"); Serial.println(entry.size(), DEC); byte r; while (entry.available()) { r = entry.read(); if (r < 0x10) { Serial.print("0"); } Serial.print(r, HEX); } Serial.println(); } <file_sep>/README.md # SD Card Recovery using an Arduino One of my SD card "died". More specifically, my laptop wouldn't read it anymore, and neither would any other machines or cameras I tried. It did react (looking at the dmesg output), but would only though errors. So I decided to try and read it using an Arduino. ## Background __Attention__ _Dangerous half knowledge ahead_ SD cards have two modes in which they can be accessed. One is the SDIO mode, which is used by your average card reader, your camera, etc. It's fast, and somewhat complicated. The other mode is SPI mode. It's much slower, but it's fairly simple. That's what you use if you connect your SD card to a microcontroller such as an arduino. ## Hardware I used a Playduino One Arduino clone, that I had lying around, and a SD card shield I got from ElectroDragon. Just plugged them together, and done is the card reader hardware. ## Software I took 3 steps in the recovery process. ### CardInfo The SD card library in the Arduino IDE contains an example called `CardInfo`. It connects to the card, and gets information such as size, filesystem type, and a list of files on the card. That worked, so I kept going. ### sd_recovery_files The sketch `sd_recovery_files.ino` tries to iterate over the filesystem, and outputs all files in a HEX encoded format over the serial port. If you save the output to a file on your machine, you can then parse the file using the supplied python script `parse_files.py`. This surprisingly worked for a number of files, but the `openNextFile()` doesn't seem to work all that well, so not all files were copied. ### sd_recovery_raw The sketch `sd_recovery_raw.ino` reads the SD card block by block, and outputs it in a HEX encoded format over the serial port. You can parse the output back into a binary file using the supplied `parse_raw.py` python script. This may just take forever, and I have not been successful in using that file, but I have yet to actually copy the entire SD card that way (might take a few days for the 4GB card I have). <file_sep>/parse_files.py #!/bin/python import re import fileinput # Header for each file, with filename and size prog = re.compile('^--- (.*);(\d*)') raw = fileinput.input() while (True): first = raw.readline() if len(first) == 0: break second = raw.readline() header = prog.match(first) name = header.group(1) size = int(header.group(2)) print "Name: " + name print "Size: " + str(size) print "Length: " + str(len(second)) if len(second) - 2 <> size * 2: print "Size mismatch. Skipping." else: print "Size matches, writing file..." output = open("out/" + name, "wb") for i in range(0,size*2,2): output.write( chr( int (second[i:i+2], 16 ) ) ) output.close() print "---" <file_sep>/parse_raw.py #!/bin/python import fileinput import sys import os def zeroLine(): for i in range(0,512): output.write(chr(0x00)) output = open("sdcard01.img", "wb") normal_lines = 0 zero_lines = 0 error_lines = 0 for line in fileinput.input(): line = line.rstrip() if line == "Y": zero_lines += 1 zeroLine() elif line == "Z": error_lines += 1 zeroLine() else: if len(line) <> 1025: print >> sys.stderr, "short line (" + str(len(line)) + ")" error_lines += 1 zeroLine() else: for i in range(0,1024,2): output.write( chr( int (line[i:i+2], 16 ) ) ) normal_lines += 1 print >> sys.stderr, "normal: ", normal_lines print >> sys.stderr, "zero: ", zero_lines print >> sys.stderr, "error: ", error_lines <file_sep>/sd_recovery_raw.ino #include <SD.h> File root; Sd2Card card; void setup() { Serial.begin(115200); pinMode(10, OUTPUT); card.init(SPI_HALF_SPEED, 10); // SD.begin(10); long blocks = card.cardSize(); //Serial.println(blocks); uint8_t data[512]; int i = 0; boolean notZero = false; for (long blockNumber = 0; blockNumber < blocks; blockNumber++) { //card.chipSelectLow(); if (!card.readBlock(blockNumber, data)) { Serial.println("Z"); break; } notZero = false; for (i = 0; i<512; i++) { if (data[i] > 0x00) { notZero = true; break; } } if (notZero) { for (i = 0; i<512; i++) { if (data[i] < 0x10) { Serial.print("0"); } Serial.print(data[i], HEX); } Serial.println("X"); } else { Serial.println("Y"); } //Serial.println("done!"); } } void loop() { // nothing happens after setup finishes. } void printDirectory(File dir, String folder) { boolean files = true; while(files) { File entry = dir.openNextFile(); if (! entry) { files = false; } else { if (entry.isDirectory()) { String folder_new = folder; folder_new += entry.name(); folder_new += "/"; printDirectory(entry, folder_new); } else { Serial.print("--- "); Serial.print(folder); Serial.print(entry.name()); Serial.print(";"); Serial.println(entry.size(), DEC); byte r; while (entry.available()) { r = entry.read(); if (r < 0x10) { Serial.print("0"); } Serial.print(r, HEX); } Serial.println(); } } } }
a84f3f3d4e1811c5d67a497d8a0d29822ff35e04
[ "Markdown", "Python", "C++" ]
5
C++
AJMartel/arduino_sd_recovery
e93a535b3ab8fd567a3f427e64471619f34a3be9
da9c69d3a8fb602210d2153552849197e77796cd
refs/heads/master
<file_sep>var size = 8; for(var i = 1;i<=size ; i++){ var st = " "; if(i%2!=0){ st = "#" for(var j=1;i<8;j++) if(j%2!=0) st+=" "; else st+="#"; } else{ for(var k=1;i<8;k++) if(j%2!=0) st+="#"; else st+=" "; } console.log(st); } <file_sep>for(var i=0 ; i < 7;i++){ var string = ""; for(var j=0 ; j <=i ;j++){ string += "#"; } console.log(string); }
e80b4310c49dc13cde7daceebce16ef6ab294ec2
[ "JavaScript" ]
2
JavaScript
sinhvienbkdn17/PracJavascript
a6e05b821afc72ae37dedd737137e65fe6d22ad6
bd2871ec9cc5fcfd62086f0d20cfd2fe43162bc0
refs/heads/master
<repo_name>pundrikmishra/FlaskTestFrappe<file_sep>/README.md # FlaskTestFrappe I have used mongodb for database and flask_restful api for this project. I have also used Flask table for showing report. and then I created 3 tables 1) Product(_id, ProductName) ![Database_Product](https://github.com/pundrikmishra/FlaskTestFrappe/blob/master/Images/Database_product.png) 2) Location(_id, LocationName) ![Database_Location](https://github.com/pundrikmishra/FlaskTestFrappe/blob/master/Images/Database_location.png) 3) ProductMovement(_id, ProductName, FromLocation, ToLocation, qty, timestamp) ![Database_ProductMovement](https://github.com/pundrikmishra/FlaskTestFrappe/blob/master/Images/Database_ProductMovement.png) I used Postman software for accesing Api. Before inserting Data in ProductMovement ![add_product_movement](https://github.com/pundrikmishra/FlaskTestFrappe/blob/master/Images/add_product_movement.png) After inserting data in ProdutMovement. ![ProductMovementAdded](https://github.com/pundrikmishra/FlaskTestFrappe/blob/master/Images/ProductMovementAdded.png) # Report ![ViewButton](https://github.com/pundrikmishra/FlaskTestFrappe/blob/master/Images/ViewButton.png) ![ViewProductMovement](https://github.com/pundrikmishra/FlaskTestFrappe/blob/master/Images/ViewProductMovement.png) <file_sep>/tables.py from flask_table import Table, Col class Results(Table): id = Col('_id', show=False) ProductName = Col('ProductName') FromLocation = Col("FromLocation") ToLocation = Col("ToLocation") qty = Col("qty") timestamp = Col("timestamp") <file_sep>/main.py from flask import Flask, request, jsonify, render_template from flask_restful import Resource, Api from flask_pymongo import PyMongo import datetime # from flask_table import Table, Col from tables import Results app = Flask(__name__) app.config["MONGO_URI"] = "mongodb://localhost:27017/frappeTest" mongo = PyMongo(app) api = Api(app) ############################### Product ######################################### class AddProduct(Resource): def post(self): product = mongo.db.product ProductName = request.json['ProductName'] if product.find_one({"ProductName": ProductName}): return jsonify({"Message": "Product Already Existed"}) else: NewProductId = product.insert({"ProductName": ProductName}) NewProduct = product.find_one({"_id": NewProductId}) output = {"ProductName": NewProduct['ProductName']} return jsonify({"New Product Added": output}) # product = mongo.db.product # location = mongo.db.location # ProductName = request.json['ProductName'] # qty = request.json['qty'] # # qty = int(qty) # LocationName = request.json['LocationName'] # if location.find_one({"LocationName": LocationName}): # if product.find_one({"ProductName": ProductName, "LocationName": LocationName}): # return jsonify({"Message": "Product Already Existed"}) # else: # NewProductId = product.insert({"ProductName": ProductName, "qty": qty, "LocationName": LocationName}) # NewProduct = product.find_one({"_id": NewProductId}) # output = {"ProductName": NewProduct['ProductName'], "qty": NewProduct['qty'], # "LocationName": NewProduct['LocationName']} # return jsonify({"New Product Added": output}) # else: # return jsonify({"Message": "Location Not Existed"}) class ViewProduct(Resource): def get(self): product = mongo.db.product output = [] for all_product in product.find(): output.append({'ProductName': all_product['ProductName']}) return jsonify({'All Product': output}) # product = mongo.db.product # output = [] # for all_product in product.find(): # output.append({'ProductName': all_product['ProductName'], "qty": all_product['qty'], # "LocationName": all_product['LocationName']}) # return jsonify({'All Product': output}) class EditProduct(Resource): def post(self): product = mongo.db.product OldProductName = request.json['OldProductName'] NewProductName = request.json['NewProductName'] if product.find_one({"ProductName": OldProductName}): EditProductId = product.update({"ProductName": OldProductName}, {"$set": {"ProductName": NewProductName}}) if EditProductId: return jsonify({"Message": "Product updated"}) else: return jsonify({"Message": "Product Not Updated"}) else: return jsonify({"Message": "Product is Not in Database"}) # product = mongo.db.product # ProductName = request.json['ProductName'] # LocationName = request.json['LocationName'] # # OldQty = request.json['OldQty'] # NewQty = request.json['NewQty'] # if product.find_one({"ProductName": ProductName, "LocationName": LocationName}): # EditProductQtyId = product.update({"ProductName": ProductName, "LocationName": LocationName}, # {"$set": {"qty": NewQty}}) # # NewProduct = product.find_one({"_id": EditProductQtyId}) # if EditProductQtyId: # return jsonify({"Message": "Product Qty updated"}) # else: # return jsonify({"Message": "Product Qty Not Updated"}) # else: # return jsonify({"Message": "Product is Not at this Location"}) # class DeleteProduct(Resource): # def post(self): # product = mongo.db.product # ProductName = request.json['ProductName'] # if product.find_one({"ProductName": ProductName}): # DeleteProductId = product.delete_one({"ProductName": ProductName}) # if DeleteProductId: # return jsonify({"Message": "Product Deleted"}) # else: # return jsonify({"Message": "Product Not Deleted"}) # else: # return jsonify({"Message": "Product is Not in Database"}) ############################### Location ######################################### class AddLocation(Resource): def post(self): location = mongo.db.location LocationName = request.json['LocationName'] if location.find_one({"LocationName": LocationName}): return jsonify({"Message": "Location Already Existed"}) else: NewLocationId = location.insert({"LocationName": LocationName}) NewLocation = location.find_one({"_id": NewLocationId}) output = {"LocationName": NewLocation['LocationName']} return jsonify({"New Location Added": output}) class ViewLocation(Resource): def get(self): location = mongo.db.location output = [] for all_location in location.find(): output.append({'LocationName': all_location['LocationName']}) return jsonify({'All Location': output}) class EditLocation(Resource): def post(self): location = mongo.db.location product = mongo.db.product OldLocationName = request.json['OldLocationName'] NewLocationName = request.json['NewLocationName'] if location.find_one({"LocationName": OldLocationName}): EditLocationId = location.update({"LocationName": OldLocationName}, {"$set": {"LocationName": NewLocationName}}) # EditProductLocationId = product.find_one({"LocationName": OldLocationName}) # return jsonify({"Message": EditProductLocationId}) # for i in EditProductLocationId: # # product.update({"LocationName": OldLocationName}, {"$set": {"LocationName": NewLocationName}}) if EditLocationId: return jsonify({"Message": "Location Updated"}) else: return jsonify({"Message": "Location Not Updated"}) else: return jsonify({"Message": "Old Location is Not in Database"}) # class DeleteLocation(Resource): # def post(self): # location = mongo.db.location # LocationName = request.json['LocationName'] # if location.find_one({"LocationName": LocationName}): # DeleteLocationId = location.delete_one({"LocationName": LocationName}) # if DeleteLocationId: # return jsonify({"Message": "Location Deleted"}) # else: # return jsonify({"Message": "Location Not Deleted"}) # else: # return jsonify({"Message": "Location is Not in Database"}) ############################### Product Movement ################################# class AddProductMovement(Resource): def post(self): product = mongo.db.product location = mongo.db.location productMovement = mongo.db.productMovement ProductName = request.json['ProductName'] FromLocation = request.json['FromLocation'] ToLocation = request.json['ToLocation'] qty = request.json['qty'] qty = int(qty) timestamp = str(datetime.datetime.now()) ProductInDatabase = product.find_one({"ProductName": ProductName}) LocationInDatabase = location.find_one({"LocationName": FromLocation}) # if product.find_one({"ProductName": ProductName}) & location.find_one({"LocationName": FromLocation} # , {"LocationName": ToLocation}): if ProductInDatabase and LocationInDatabase: if productMovement.find_one({"ProductName": ProductName, "FromLocation": FromLocation}): # If product and and location already in same row then update only qty and time QtyInDatabase = productMovement.find_one({"ProductName": ProductName, "FromLocation": FromLocation}) qty = QtyInDatabase['qty'] + qty NewProductMovementId = productMovement.update({"ProductName": ProductName, "FromLocation": FromLocation}, # "ToLocation": ToLocation}, {"$set": {"qty": qty, "timestamp": timestamp}}) # NewProductMovement = productMovement.find_one({"_id": NewProductMovementId}) # output = {"ProductName": NewProductMovement['ProductName'], # "FromLocation": NewProductMovement['FromLocation'], # # "ToLocation": NewProductMovement['ToLocation'], # "qty": NewProductMovement['qty'], # "timestamp": NewProductMovement['timestamp']} if NewProductMovementId: return jsonify({"Message": "ProductMovement Qty Added"}) else: return jsonify({"Message": "ProductMovement Qty Not Added"}) else: NewProductMovementId = productMovement.insert({"ProductName": ProductName, "FromLocation": FromLocation, "ToLocation": ToLocation, "qty": qty, "timestamp": timestamp}) NewProductMovement = productMovement.find_one({"_id": NewProductMovementId}) output = {"ProductName": NewProductMovement['ProductName'], "FromLocation": NewProductMovement['FromLocation'], "ToLocation": NewProductMovement['ToLocation'], "qty": NewProductMovement['qty'], "timestamp": NewProductMovement['timestamp']} return jsonify({"New ProductMovement Added": output}) else: return jsonify({"Message": "Product or Location is Not in Database"}) @app.route('/view_product_movement') def ViewProductMovement(): productMovement = mongo.db.productMovement output = [] for all_product in productMovement.find(): output.append({"ProductName": all_product['ProductName'], "FromLocation": all_product['FromLocation'], "ToLocation": all_product['ToLocation'], "qty": all_product['qty'], "timestamp": all_product['timestamp']}) table = Results(output) table.border = True return render_template('result.html', table=table) # class ViewProductMovement(Resource, Table): # id = Col('_id', show=False) # ProductName = Col('ProductName') # FromLocation = Col("FromLocation") # ToLocation = Col("ToLocation") # qty = Col("qty") # timestamp = Col("timestamp") # def get(self): # productMovement = mongo.db.productMovement # output = [] # for all_product in productMovement.find(): # output.append({"ProductName": all_product['ProductName'], # "FromLocation": all_product['FromLocation'], # "ToLocation": all_product['ToLocation'], # "qty": all_product['qty'], # "timestamp": all_product['timestamp']}) # table = ViewProductMovement(output) # return jsonify(table) # # return jsonify({'All Product Movement': output}) class EditProductMovement(Resource): def post(self): # product = mongo.db.product location = mongo.db.location productMovement = mongo.db.productMovement ProductName = request.json['ProductName'] FromLocation = request.json['FromLocation'] ToLocation = request.json['ToLocation'] qty = request.json['qty'] qty = int(qty) timestamp = str(datetime.datetime.now()) if location.find_one({"LocationName": ToLocation}): # ProductInDatabase = product.find_one({"ProductName": ProductName}) # LocationInDatabase = location.find_one({"LocationName": FromLocation}) # ProductLocationQty = productMovement.find({"ProductName": ProductName, "$or": # {"FromLocation": FromLocation, # "ToLocation": ToLocation }}) ProductLocationQtyId = productMovement.find_one({"ProductName": ProductName, "FromLocation": FromLocation}) QtyInDatabase = ProductLocationQtyId['qty'] if productMovement.find_one({"ProductName": ProductName, "FromLocation": FromLocation}): if productMovement.find_one({"ProductName": ProductName, "FromLocation": FromLocation, "qty": {"$gte": qty}}): updatedQty = QtyInDatabase - qty NewProductMovementId = productMovement.update({"ProductName": ProductName, "FromLocation": FromLocation}, {"$set": {"qty": updatedQty, "ToLocation": ToLocation, "timestamp": timestamp}}) ProductQtyAtLocationId = 0 ProductAtNewLocationId = 0 if productMovement.find_one({"ProductName": ProductName, "FromLocation": ToLocation}): ProductLocationQtyId = productMovement.find_one({"ProductName": ProductName, "FromLocation": ToLocation}) LocationQtyInDatabase = ProductLocationQtyId['qty'] LocationQtyUpdated = LocationQtyInDatabase + qty ProductQtyAtLocationId = productMovement.update({"ProductName": ProductName, "FromLocation": ToLocation}, {"$set": {"qty": LocationQtyUpdated, # "ToLocation": ToLocation, "timestamp": timestamp}} ) else: ProductAtNewLocationId = productMovement.insert({"ProductName": ProductName, "FromLocation": ToLocation, "ToLocation": "", "qty": qty, "timestamp": timestamp}) if NewProductMovementId and (ProductQtyAtLocationId or ProductAtNewLocationId): return jsonify({"Message": "Product Movement is done"}) else: return jsonify({"Message": "ProductMovement not done "}) else: return jsonify({"Message": "Only " + str(QtyInDatabase) + " Product quantity is available in database"}) else: return jsonify({"Message": "Product is not available at this location"}) else: return jsonify({"Message": "To Location is not in database"}) api.add_resource(AddProduct, '/add_product') api.add_resource(ViewProduct, '/view_product') api.add_resource(EditProduct, '/edit_product') # api.add_resource(DeleteProduct, '/delete_product') api.add_resource(AddLocation, '/add_location') api.add_resource(ViewLocation, '/view_location') api.add_resource(EditLocation, '/edit_location') # api.add_resource(DeleteLocation, '/delete_location') api.add_resource(AddProductMovement, '/add_product_movement') # api.add_resource(ViewProductMovement, '/view_product_movement') api.add_resource(EditProductMovement, '/edit_product_movement') # api.add_resource(MoveProductMovement, '/move_product_movement') # api.add_resource(DeleteProductMovement, '/delete_product_movement') if __name__ == '__main__': app.run(debug=True, port=8080)
5ad1217b39c22a4b71fad10ad202437e9b88c319
[ "Markdown", "Python" ]
3
Markdown
pundrikmishra/FlaskTestFrappe
c2bef32fa79184f4d83358d011a71ead3a2701c7
c22e1f9bb003a163f052872e54405f74713bc970
refs/heads/master
<repo_name>sandeep-ops/sandy<file_sep>/readme.md updated docs updated docs sprint-1 <file_sep>/test/main.py sprint-1 sprint-2
4a2d6cf95f82b4905f5aca5eacb13366b27019b7
[ "Markdown", "Python" ]
2
Markdown
sandeep-ops/sandy
80ef2984976ce7169029d0423d4af2de05d1bd1b
ddff75cce71c803715faee8e4e56af21822907f9
refs/heads/master
<file_sep>import Layout from 'lib/components/layout' import { Button,Modal ,useModal,Image ,Display,Spacer} from '@geist-ui/react' export const meta = { title: '关于我', date: '2021-02-25', description: 'About Me.', } export const Contact = () => { const { setVisible, bindings } = useModal() return ( <> <a href="#" onClick={() => setVisible(true)}> 你可以通过这些地址联系我♥ </a> <Modal {...bindings}> <Modal.Title>联系我♥</Modal.Title> <Modal.Subtitle>这是安德鲁的联系地址</Modal.Subtitle> <Modal.Content> <Image width={150} height={150} src="https://cdn.jsdelivr.net/gh/w7xg/Blog/public/assets/2code.png" /> <Spacer y={2}/> <Text>E-mail:<EMAIL></Text> <Text>Github:@w7xg</Text> <Text>Twitter:@w7xg233</Text> </Modal.Content> <Modal.Action onClick={({ close }) => close()}>Got it!</Modal.Action> </Modal> </> ) } <Display shadow caption="今天又是洛圣都核平的一天"> <Image width="100%" src="https://cdn.jsdelivr.net/gh/w7xg/Blog/public/images/la.jpg" /> </Display> 你好,我叫**安德鲁**,一个~~无政府主义~~者,~~奇怪?~~的人,~~人民公敌~~,~~反动派~~。对`React`和`Airplane`?非常感兴趣。目前正在学习`Next.js`。总共花费了$0在网站搭建上,只是花费了大量时间。主要贡献是没有贡献。<Contact /> export default ({ children }) => <Layout meta={meta}>{children}</Layout> <file_sep>import Layout from 'lib/components/layout' import { Button,Image ,Display ,Snippet,Tabs,Tree,Spacer} from '@geist-ui/react' export const meta = { title: 'United States of America Introduction', date: '2021-02-25', description: '', } <Display shadow caption="签署《独立宣言》时的场景"> <Image src="https://cdn.jsdelivr.net/gh/w7xg/Blog/public/images/Declaration_of_Independence_(1819),_by_John_Trumbull.jpg" width="100%" /> </Display> 这是维基百科<a>https://zh.wikipedia.org/wiki/%E7%BE%8E%E5%9B%BD</a>上关于美利坚合众国的介绍: >美利坚合众国(英语:United States of America,缩写为USA,一般称为United States(U.S.或US),或America),中文通称美国。是由其下辖50个州、华盛顿哥伦比亚特区、五个自治领土及外岛共同组成的联邦共和国[注 1]。美国本土48州和联邦特区位于北美洲中部,东临大西洋,北面是加拿大,南部和墨西哥及墨西哥湾接壤[5],本土位于温带、副热带地区。阿拉斯加州位于北美大陆西北方,东部为加拿大,西隔白令海峡和俄罗斯相望;夏威夷州则位于大洋洲太平洋中部的群岛。美国在加勒比海和太平洋还拥有多处境外领土和岛屿地区。此外,美国还在全球很多个国家和地区拥有着众多海外军事基地[15]。美国的面积超过983万平方公里,正式为9,834,000万平方公里[5],位居世界第三或第四[16];同时拥有接近超过3.3亿人口[5],为世界第三人口大国[17]。是世界上人口最多的发达国家。美国有着来自世界各地的大量移民,是世界上民族和文化最多元的国家之一[18]。美国地形与气候复杂多样,是多种野生动物的家园[19]。 >一万五千多年前,古印第安人自亚洲迁徙至北美大陆[20]。16世纪欧洲开始殖民北美。现今的合众国起始于东海岸的13个英属美洲殖民地[21]。欧洲七年战争后,大不列颠王国与其殖民地之间的争议愈发剧烈,最终导致在1775年爆发美国革命。1776年7月4日,正与大不列颠进行独立战争的各殖民地派出代表,协同一致发表《独立宣言》。战争终止于1783年,大不列颠王国承认这13个北美殖民地脱离管辖而独立,与其签订《巴黎条约》[21]。这场战争也是第一场成功脱离欧洲殖民帝国的独立战争[22]。1781年,《邦联条例》在邦联13个构成州获得通过,共同组成了邦联议会。1787年《美利坚合众国宪法》完稿,将“美利坚合众国”改制为联邦体制,联邦政府随之成立。1791年,合称为权利法案的十条宪法修正案获得批准,担保了基本民权[23]。 >自19世纪起,美国政府通过强行移置原住民,征服及购买等方法大力扩张领土,随着逐渐不断地承认扩张领地为新州份,至1848年时美国疆域已横跨整个北美大陆[24]。19世纪下半叶爆发的内战,使曾经合法的奴隶制度得以终结[25][26]。在19世纪末,美国已将其领土延伸到太平洋的夏威夷。美国经济在工业革命的推动下,自那时起也开始蓬勃发展[27]。随后美西战争的胜利,使美国势力进入加勒比海地区及太平洋西部;而参与第一次世界大战则奠定其作为一个全球性军事力量的基础。尽管在1930年代经历经济大萧条,美国在第二次世界大战获得胜利之后崛起成为超级大国。作为联合国安全理事会常任理事国,美国是世界上第一个研发出核武器,也是唯一一个曾将其投入实战的国家[28]。战后美国与苏联进行了数十年冷战,顶峰时期的太空竞赛促使了人类第一次登月计划的成功。在1991年苏联解体后,至今三十年来,美国成为世上唯一的超级强国及经济强国[29]。 >美国作为一个高度发达国家,是世界上最大的进口国及第二大的商品出口国[30][31],国内生产总值按国际汇率排名世界第一、而依购买力平均则位列第二。在国民平均薪资[32]、人类发展指数、人均国内生产总值以及人均生产力[33]等社会经济学表现指标上,美国均处于世界领先地位。美国经济已步入后工业时代,服务行业占据经济主导地位,位列世界第一。同时,其生产制造业规模也极为庞大,位居于世界第二[34]。仅占据全世界4.4%人口的美国[35]贡献了世界四分之一的国内生产总值和三分之一的全球军事开支[36],这使其在经济和军事上均处于全世界最重要的地位。美国在政治和文化上是一支世界显著并影响深远的力量,也在科学研究和技术创新上占据世界领导地位[37]。美国也是联合国、世界银行、国际货币基金组织、美洲国家组织等组织的创始成员国以及总部所在地,在国际事务中占据重要话语权。 美利坚合众国是世界上的独立国家,中华人民共和国也是世界上的独立国家,至于台湾...。我们不能一味的“诋毁”其它国家,也不能一味的贬低自己国家,因为,这是我们共同的家园--The Earth. 还有最重要的一个原因,处理国与国之间的关系并不像使用Npm或Yarn包那么简单,要不然您试试: <Tabs initialValue="1"> <Tabs.Item label="Yarn" value="1"><Snippet text="yarn add america" width="200px" /></Tabs.Item> <Tabs.Item label="Npm" value="2"><Snippet text="npm i america --save" width="200px" /></Tabs.Item> </Tabs> 看吧,您得到了如下目录结构的文件,这很神奇,不是吗?: <Tree> <Tree.File name="package.json" /> <Tree.Folder name="node_modules"> <Tree.File name=".yarn-integrity" /> <Tree.Folder name="america"> <Tree.File name="index.js" /> <Tree.File name="package.json" /> </Tree.Folder> </Tree.Folder> <Tree.File name="yarn.lock" /> </Tree> <Spacer y={2}/> 美利坚合众国国歌: <audio width="100%" controls="controls" src="https://cdn.jsdelivr.net/gh/w7xg/Blog/public/music/Star_Spangled_Banner_instrumental.ogg.mp3"> </audio> >格言: *In God We Trust* export default ({ children }) => <Layout meta={meta}>{children}</Layout> <file_sep>import React, { useState, useEffect } from 'react' import { Row, useTheme, User, Link } from '@geist-ui/react' import NextLink from 'next/link' import ProfileLinks from './profile-links' import { Configs } from '../utils' const Profile = React.memo(({}) => { const theme = useTheme() const [showText, setShowText] = useState(theme.type === 'dark') useEffect(() => { const show = theme.type === 'dark' if (showText !== show) { setShowText(show) } }, [theme.type]) return ( <div className="profile"> <Row align="bottom" className="user"> <NextLink href="/" passHref> <Link> <User src="/assets/avatar.png" name={Configs.author} altText="avatar"> {Configs.summary} </User> </Link> </NextLink> </Row> <ProfileLinks /> <style jsx>{` .profile { padding: ${theme.layout.gap} 0; } .profile :global(.user) { padding-left: 0; margin-bottom: ${theme.layout.gapQuarter}; max-width: 100%; overflow: hidden; } @media only screen and (max-width: ${theme.layout.breakpointMobile}) { .profile { width: 100%; display: flex; flex-direction: column; align-items: center; padding-top: 5rem; } } `}</style> </div> ) }) export default Profile
6f8f7642c5febce1a3c45431d2e02398b2509101
[ "Markdown", "JavaScript" ]
3
Markdown
w7xg/Blog
c2e6d28c136de213137549822048b330a106fc75
8ff1fde9e617c5d0afe616051c94c3478da9f090
refs/heads/main
<file_sep>include ':facedetect' include ':app' rootProject.name = "FaceDetectAPI"<file_sep># FaceDetection This library uses the latest [Ver-ID SDK](https://github.com/AppliedRecognition/Ver-ID-UI-Android) to detect a face in a supplied image and compare it to a face detected in a Ver-ID liveness detection session. The library communicates the similarity scores between the compared faces. <file_sep>package com.courage.facedetectapi; import androidx.appcompat.app.AppCompatActivity; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.drawable.BitmapDrawable; import android.net.Uri; import android.os.Bundle; import android.util.Log; import android.view.View; import android.widget.Button; import android.widget.ImageView; import android.widget.TextView; //import com.appliedrec.verid.core2.Face; import com.appliedrec.verid.core2.VerID; import com.appliedrec.verid.core2.VerIDFactory; import com.appliedrec.verid.core2.VerIDFactoryDelegate; import com.appliedrec.verid.core2.session.LivenessDetectionSessionSettings; import com.appliedrec.verid.core2.session.VerIDSessionResult; import com.appliedrec.verid.identity.VerIDIdentity; import com.appliedrec.verid.identity.VerIDSDKIdentity; import com.appliedrec.verid.ui2.IVerIDSession; import com.appliedrec.verid.ui2.VerIDSession; import com.appliedrec.verid.ui2.VerIDSessionDelegate; //import com.courage.facedetect.FaceCheck; import com.google.android.gms.vision.Frame; import com.google.android.gms.vision.face.Face; import com.google.android.gms.vision.face.FaceDetector; import com.google.android.gms.vision.face.Landmark; //import com.courage.facedetect.FaceCheck; import java.util.concurrent.CompletableFuture; import static android.graphics.Bitmap.createBitmap; import static androidx.test.InstrumentationRegistry.getContext; import static java.util.concurrent.CompletableFuture.completedFuture; //import com.appliedrec.verid.ui2. public class MainActivity extends AppCompatActivity implements VerIDFactoryDelegate, VerIDSessionDelegate { ImageView imageView; Button btnTakePicture; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); imageView = (ImageView) findViewById(R.id.imageView); btnTakePicture = (Button) findViewById(R.id.btnTakePicture); // FaceCheck.getImage(null,this); // startLivenessDetectionSession(); } @Override public void onVerIDCreated(VerIDFactory verIDFactory, VerID verID) { // You can now start a Ver-ID session Log.i("TAG", "onVerIDCreated: "); LivenessDetectionSessionSettings settings = new LivenessDetectionSessionSettings(); VerIDSession session = new VerIDSession(verID, settings); session.setDelegate(this); session.start(); } @Override public void onVerIDCreationFailed(VerIDFactory verIDFactory, Exception e) { // Failed to create an instance of Ver-ID Log.i("TAG", "onVerIDCreationFailed: " + e.getCause()); e.printStackTrace(); } @Override public void onSessionFinished(IVerIDSession<?> session, VerIDSessionResult result) { if (!result.getError().isPresent()) { // Session succeeded Log.i("TAG", "onSessionFinished: "); } else { Log.i("TAG", "onSessionFinished: unsuccessful"); // Session failed } } @Override public void onSessionCanceled(IVerIDSession<?> session) { session.getSessionIdentifier(); Log.i("TAG", "onSessionCanceled: "); } }
c919ea121946246772f0161a3393cba7deb5deea
[ "Markdown", "Java", "Gradle" ]
3
Gradle
couragepaul/FaceDetection
ea0adaa3184b4986171fa066f4d17a20e262e206
9cfe626239c95943abf9fd2b5be5a720ebce0650
refs/heads/master
<repo_name>vantam1999/160store<file_sep>/index.js const carouselBanner = function() { var carInner = document.querySelector('.carousel__banner .carousel__inner'); var carItem = document.querySelectorAll('.carousel__banner .carousel__banner--item'); var prev = document.querySelector('.controls__prev'); var next = document.querySelector('.controls__next'); var wWindow = window.innerWidth; var wCarInner = carItem.length * wWindow; var wItem = wWindow; // === set width === // carInner.style.width = wCarInner + "px"; carItem.forEach( item => { item.style.width = wWindow + "px"; }); function moveSlide(w) { for(let i = 0; i < carItem.length; i++) { carItem[i].style.transform += "translateX("+ w +"px)"; carItem[i].style.transition = "all .4s"; } } next.addEventListener('click', function() { if(wItem < wCarInner) { moveSlide(-wWindow); wItem += wWindow; } else { wItem = wWindow; for(let i = 0; i < carItem.length; i++) { carItem[i].style.transform = "translateX(0px)"; carItem[i].style.transition = "all .4s"; } } console.log(wItem); }); prev.addEventListener('click', function() { if(wItem > wWindow) { moveSlide(wWindow); wItem -= wWindow; } else { wItem = wCarInner; let wTemp = wCarInner - wWindow; for(let i = 0; i < carItem.length; i++) { carItem[i].style.transform += "translateX("+ -wTemp +"px)"; carItem[i].style.transition = "all .4s"; } } console.log(wItem); }); }; const buttonMore = function() { }; const sliderProduct = function() { $('.owl-carousel').owlCarousel({ loop: true, nav: true, margin: 10, responsive:{ 0:{ items:1, }, 600:{ items:3, }, 1100:{ items:5, } } }); }; document.addEventListener('DOMContentLoaded', function() { carouselBanner(); buttonMore(); sliderProduct(); }); <file_sep>/js/productDetail.js const previewImage = function() { var imageSmall = document.querySelectorAll('.type__imgSmall div'); var imageLarge = document.querySelector('.preview__imgLarge'); imageSmall.forEach((item, index)=> { item.addEventListener('click', function(e){ let srcImg = e.target.getAttribute('src'); imageLarge.setAttribute('src', srcImg); }); }); }; const activeSize = function() { var listSize = document.querySelectorAll('.list__size div'); listSize.forEach(item => { item.addEventListener('click', function(e) { for(let i = 0; i < listSize.length; i++) { listSize[i].classList.remove('active__size'); } e.target.classList.add('active__size'); }); }); }; const chooseQuantity = function() { var quantity = document.querySelector('.quantity'); var minus = document.querySelector('.minus'); var plus = document.querySelector('.plus'); var valQuantity = parseInt(quantity.value); function decreaseQuantity() { valQuantity--; if(valQuantity >= 1) { quantity.setAttribute('value', valQuantity); } else { valQuantity = 1; return; } } function increaseQuantity() { valQuantity++; if(valQuantity <= 20) { quantity.setAttribute('value', valQuantity); } else { valQuantity = 20; return; } } minus.addEventListener('click', function() { decreaseQuantity(); }); plus.addEventListener('click', function() { increaseQuantity(); }); }; document.addEventListener('DOMContentLoaded', function() { previewImage(); activeSize(); chooseQuantity(); });<file_sep>/share/share.js const navHeader = function() { var navItem = document.querySelectorAll('.nav .has-dropdown'); var menu = document.querySelector('.menu'); var menuHome = document.querySelector('.menu__home'); window.addEventListener('scroll', function() { if(document.documentElement.scrollTop >= 250) { menu.style.position = "fixed"; menu.style.top = "0"; menu.style.zIndex = "1000"; menu.style.boxShadow = "rgba(0, 0, 0, 0.5) 0px -2px 6px 0px"; menuHome.style.padding = "0"; } else { menu.style.position = "relative"; menu.style.boxShadow = "none"; menuHome.style.padding = "10px 0"; } }); function activeHover(e) { var dropdown = e.target.nextElementSibling; var dropList = dropdown.children[0]; var dropListHeight = dropList.offsetHeight; dropdown.classList.add('dropdown__active'); dropdown.style.height = dropListHeight + "px"; } function removeHover(e) { var dropdown = e.target.nextElementSibling; dropdown.classList.remove('dropdown__active'); dropdown.style.height = "0"; } navItem.forEach( item => { item.addEventListener('click', function(e) { if(!e.target.nextElementSibling.classList.contains('dropdown__active')) { activeHover(e); } else { removeHover(e); } }); }); }; const effectSocial = function() { var btnOpen = document.querySelector('.contact__social--open'); var itemSocial = document.querySelectorAll('.contact__social li'); var isClick = false; function onOpen() { for(let i = 0; i < itemSocial.length; i++) { itemSocial[i].style.transition = "all .4s"; itemSocial[i].style.transitionDelay = i*0.08+'s'; } itemSocial[0].style.transform = "translate(0, -58px) rotate(720deg)"; itemSocial[1].style.transform = "translate(-46px, -45px) rotate(720deg)"; itemSocial[2].style.transform = "translate(-58px, 0px) rotate(720deg)"; itemSocial[3].style.transform = "translate(-46px, 45px) rotate(720deg)"; itemSocial[4].style.transform = "translate(0, 58px) rotate(720deg)"; } function onClose() { itemSocial[0].style.transform = "translate(0, 0) rotate(0)"; itemSocial[1].style.transform = "translate(0, 0) rotate(0)"; itemSocial[2].style.transform = "translate(0, 0) rotate(0)"; itemSocial[3].style.transform = "translate(0, 0) rotate(0)"; itemSocial[4].style.transform = "translate(0, 0) rotate(0)"; } btnOpen.addEventListener('click', function() { if(!isClick) { onOpen(); } else { onClose(); } isClick = !isClick; }); }; window.addEventListener('load', function() { var wrapper = document.querySelector('.wrapper'); var loading = document.querySelector('.loading'); wrapper.style.display = 'block'; loading.style.display = 'none'; navHeader(); effectSocial(); });
188c723f87c85c7bd630f3910eb8516d66a97f48
[ "JavaScript" ]
3
JavaScript
vantam1999/160store
11900b0826b0e4f56c982656f645e9fcd7c071fd
18c6680d78491b570a279f887819b1477cecac5f
refs/heads/master
<file_sep>document.write('<h1>Hello Feed!</h1>'); <file_sep>var name = 'JoeRay61'; var age = 24; document.write('My name is ' + name + ', I\'m ' + age + ' years old.'); <file_sep>var style = require('./a.css'); document.write('<div class=' + style.foo + '>Module - Foo</div>'); document.write('<div class="bar">Module - Bar</div>'); <file_sep>var data = require('./data'); document.write('<p>foo is ' + data.foo + '</p>'); <file_sep>var data = { foo: 123, bar: 456 }; module.exports = data; <file_sep>document.write('<h1>Hello Profile!</h1>'); <file_sep>var data = require('./data'); document.write('<p>bar is ' + data.bar + '</p>'); <file_sep># webpack-demo webpack demos for beginners
71f2a8190f2f66c8ce9c0fc2f282f8066e8687aa
[ "JavaScript", "Markdown" ]
8
JavaScript
Joe3Ray/webpack-demo
c75d71a8fd840285cade7033dab9ee21a85778f1
8e180b41c8e8ccc37ece5d32021fbc9aa493bcd6
refs/heads/master
<repo_name>ku-ya/dotfiles-arch<file_sep>/tmux_make.sh #!/bin/bash set -e TMUX_VERSION="2.5" sudo apt-get remove --purge tmux sudo apt-get install cmake libevent-dev libncurses5-dev checkinstall if [[ ! -d "$HOME/tmux" ]]; then git clone https://github.com/tmux/tmux.git ~/tmux cd ~/tmux git checkout $TMUX_VERSION else cd ~/tmux git checkout $TMUX_VERSION fi cd ~/tmux sh autogen.sh ./configure && make sudo checkinstall <file_sep>/install.sh #!/usr/bin/env bash set -e sudo apt-get update sudo apt-get -y install git\ tmux\ cmake\ curl\ libtool libtool-bin\ g++ pkg-config\ autoconf automake libncurses5-dev\ software-properties-common\ python-pip\ vim\ zsh\ xclip\ build-essential #if [ ! -d "~/anaconda3" ]; then # wget https://repo.continuum.io/archive/Anaconda3-4.3.1-Linux-x86_64.sh -O ~/anaconda.sh # bash ~/anaconda.sh -b -p $HOME/anaconda # export PATH="$HOME/anaconda3/bin:$PATH" #fi #export PATH="$HOME/anaconda3/bin:$PATH" sudo apt-get install python-dev python-pip python3-dev python3-pip pip install -U pip pip install powerline-status pip install powerline-gitstatus # neovim install #sudo apt-get install software-properties-common #sudo add-apt-repository ppa:neovim-ppa/stable #sudo apt-get update #sudo apt-get install neovim #Get or update neovim github repo if [ ! -d "~/src" ]; then mkdir -p ~/Documents/src fi cd ~/Documents/src if [ ! -e ~/Documents/src/neovim ]; then git clone https://github.com/neovim/neovim.git else cd neovim fi cd ~/Documents/src/neovim #Checkout stable release # git checkout v0.1.7 #Remove old build dir #rm -r build/ # Build and install neovim make CMAKE_BUILD_TYPE=RelWithDebInfo make install pip3 install neovim # Enable use of python plugins #pip2 install --user --upgrade neovim apt-get install pandoc -y ## neovim\ ##add-apt-repository ppa:neovim-ppa/unstable ##apt-get update ##apt-get install neovim ## bash Anaconda3 #os="$(uname)" #echo "$os" ## if["$os"=="Linux"]; then ## echo "Install neovim from os repositories" ## fi ## if[!-d "$HOME/tmp"]; then ## mkdir $HOME/tmp ##fi ## curl -o $HOME/tmp/conda.sh https://repo.continuum.io/archive/Anaconda3-4.3.1-Linux-x86_64.sh <file_sep>/README.md # dotfile ## System dependencies and application install - run ```bash install.sh``` - TODO: complete by adding repository download and execution system configuration dotfile - if system copy is not working check: sudo apt-get install vim-gtk ```bash $ git clone https://github.com/ku-ya/dotfiles.git ~/.dotfiles $ cd ~/.dotfiles $ ./install ``` ## powerline unified theme [link to ubuntu help](https://askubuntu.com/questions/283908/how-can-i-install-and-use-powerline-plugin) [powerline-gitstatus](https://github.com/jaspernbrouwer/powerline-gitstatus) - after json file updade, run: powerline-daemon --replace - powerline is used for all ipython, tmux, and neovim. ``` $ pip install powerline-status $ pip install powerline-gitstatus ``` ## Switching cap-lock and esc - use setxkbmap - config/xkb/config.keyboard file canbe applied ## tmux.conf - powerline theme and configuration ## system wise font setting sudo fc-cache -vf ~/.fonts * if using gnome-terminal the terminal configuration should be changed to use powerline fonts ## neovim - config/nvim/init.vim ## .zshrc - The error ```function definition file not found```: fix: rm ~/.zcompdump* - or add ```setopt HIST_IGNORE_ALL_DUPS``` ## Latex - Use vimtex + zathura (install zathura and zathura-dev) - Inverse and forward search works - [some useful info](https://wikimatze.de/vimtex-the-perfect-tool-for-working-with-tex-and-vim/) - require xdotool
8187e54673647a559fad17a464a18f4eddca200a
[ "Markdown", "Shell" ]
3
Shell
ku-ya/dotfiles-arch
46241465a6edf0eb9880a923edccbc6ee6e5e707
7cea3f585e2f318169d49668dcbfe972bdc1337e
refs/heads/main
<file_sep>import pyautogui import cv2 import numpy as np from pynput.keyboard import Key, Controller import time import keyboard time.sleep(5) keyboar = Controller() # Define the colors to find. OpenCV uses BGR instead of RGB #grey = [174,163,135] #blue = [255,255,0] #green = [5,250,18] #red = [63,57,249] #purple = [153,75,194] while True: #saves a screenshot to a certain path im = pyautogui.screenshot(region = (2040, 180, 1250, 320)) # Load image purp = im.getpixel((180, 50)) red = im.getpixel((1100, 50)) green = im.getpixel((805, 55)) blue = im.getpixel((485, 55)) if (blue == (0,255,255)): keyboar.press('s') keyboar.release('s') print("test") if (green == (18,250,5)): keyboar.press('w') keyboar.release('w') if (red == (249,57,63)): keyboar.press('d') keyboar.release('d') if (purp == (194,75,153)): keyboar.press('a') keyboar.release('a') pyautogui.press('enter')
7dcb7d539bdecdad7cd2c5775e512056fffbed18
[ "Python" ]
1
Python
jjcraze9/jonahproj3
158045e62c0695dd0c4bcc27d8c4fbaf9bf66799
b86a925a2ace5d5c10de2d32360bf234eba9400c
refs/heads/master
<file_sep>-- phpMyAdmin SQL Dump -- version 4.8.5 -- https://www.phpmyadmin.net/ -- -- Host: 127.0.0.1 -- Waktu pembuatan: 23 Nov 2019 pada 15.28 -- Versi server: 10.1.38-MariaDB -- Versi PHP: 7.3.3 SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO"; SET AUTOCOMMIT = 0; START TRANSACTION; SET time_zone = "+00:00"; /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; /*!40101 SET NAMES utf8mb4 */; -- -- Database: `s-p-a` -- -- -------------------------------------------------------- -- -- Struktur dari tabel `calas` -- CREATE TABLE `calas` ( `id` int(10) UNSIGNED NOT NULL, `npm` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `nama` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `kelas` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `jurusan` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `fakultas` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `alamat` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `nomor_telepon` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `krs` text COLLATE utf8mb4_unicode_ci NOT NULL, `avatar` text COLLATE utf8mb4_unicode_ci NOT NULL, `user_id` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `status_kelulusan` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data untuk tabel `calas` -- INSERT INTO `calas` (`id`, `npm`, `nama`, `kelas`, `jurusan`, `fakultas`, `alamat`, `nomor_telepon`, `krs`, `avatar`, `user_id`, `status_kelulusan`, `created_at`, `updated_at`) VALUES (1, '51416339', '<NAME>', '4IA05', 'Teknik Informatika', 'Teknologi INdustri', 'Bogor', '081284418504', 'img/krs/krs.jpg', 'img/avatar/bambang.jpg', '1', 'Lulus', NULL, '2019-11-18 07:34:11'), (16, '51416339', '<NAME>', '4IA01', 'Teknik Mesin', 'sasasa', 'bogor', '081284418504', 'krs/krs.jpg', 'avatar/bambang.jpg', '39', 'Tidak Lulus', '2019-11-07 06:21:59', '2019-11-07 06:21:59'), (17, '51416339', 'bambang', '4IA01', 'Teknik Kimia', 'Teknologi Pangan', 'jl samber gledek no 90 re3 rw2', '081284418504', 'krs/krs.jpg', 'avatar/bambang.jpg', '40', 'Tidak Lulus', '2019-11-07 06:26:00', '2019-11-07 06:26:00'), (18, '51416339', '<NAME>', '4IA02', 'Teknik Mesin', 'Teknologi Pangan', 'Jl. WIbu 23', '9788278320', 'krs/krs.jpg', 'avatar/bambang.jpg', '41', 'Tidak Lulus', '2019-11-07 07:16:57', '2019-11-18 07:35:33'); -- -------------------------------------------------------- -- -- Struktur dari tabel `failed_jobs` -- CREATE TABLE `failed_jobs` ( `id` bigint(20) UNSIGNED NOT NULL, `connection` text COLLATE utf8mb4_unicode_ci NOT NULL, `queue` text COLLATE utf8mb4_unicode_ci NOT NULL, `payload` longtext COLLATE utf8mb4_unicode_ci NOT NULL, `exception` longtext COLLATE utf8mb4_unicode_ci NOT NULL, `failed_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Struktur dari tabel `migrations` -- CREATE TABLE `migrations` ( `id` int(10) UNSIGNED NOT NULL, `migration` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `batch` int(11) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data untuk tabel `migrations` -- INSERT INTO `migrations` (`id`, `migration`, `batch`) VALUES (1, '2014_10_12_000000_create_users_table', 1), (2, '2014_10_12_100000_create_password_resets_table', 1), (3, '2019_08_19_000000_create_failed_jobs_table', 1), (4, '2019_11_05_091950_create_calas_table', 1); -- -------------------------------------------------------- -- -- Struktur dari tabel `password_resets` -- CREATE TABLE `password_resets` ( `email` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `token` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Struktur dari tabel `users` -- CREATE TABLE `users` ( `id` bigint(20) UNSIGNED NOT NULL, `name` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `email` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `email_verified_at` timestamp NULL DEFAULT NULL, `password` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `role` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `remember_token` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data untuk tabel `users` -- INSERT INTO `users` (`id`, `name`, `email`, `email_verified_at`, `password`, `role`, `remember_token`, `created_at`, `updated_at`) VALUES (1, '<NAME>', '<EMAIL>', NULL, '$2y$10$zTtHgCeneUR52CFUR2xJieU1iSWPGMTRD1AEpkQh9JYrxFQ.DOJ1y', 'admin', '<PASSWORD>KpsCwbkxlZQW1Qr3R6fvKEXZJK3hmjtaBpGLb5', '2019-11-05 02:33:36', '2019-11-05 02:33:36'), (40, 'bambang', '<EMAIL>', NULL, '$2y$10$bzKM7gcniwlDpLEmyhcZZOmx7ullmk9lNVIofIKtV9PPViUZmqXo.', 'calas', 'soTwVUO2YUWiAUdzl0bELTkur0bPIka1z0L1FRXWr3aMM75IB8QqSRxr9bc5', '2019-11-07 06:26:00', '2019-11-07 06:26:00'), (41, '<NAME>', '<EMAIL>', NULL, '$2y$10$jU5rK.agggFwqArTeubl5OTuk9I49HGhIXmGxJLRYhf3sVJNO8856', 'calas', 'GvkSwu87xs8Owqlj5kPNEAgKafRjqkYN7a686GJv6a1sUh457sJWU4kRpAaQ', '2019-11-07 07:16:56', '2019-11-07 07:16:56'), (42, '<NAME>', '<EMAIL>', NULL, '$2y$10$MATQkWwFosaq3Uvipnndt.Guk.MmHflZ2EAC9mziWoxGfN0LKWk2a', 'calas', 'JVstQ7PckexdJ5HDRxndM9xUywKNdkXEb2yvqLtydPp6G5MHA2x6rpw2Mv4N', '2019-11-18 03:13:53', '2019-11-18 03:13:53'); -- -- Indexes for dumped tables -- -- -- Indeks untuk tabel `calas` -- ALTER TABLE `calas` ADD PRIMARY KEY (`id`); -- -- Indeks untuk tabel `failed_jobs` -- ALTER TABLE `failed_jobs` ADD PRIMARY KEY (`id`); -- -- Indeks untuk tabel `migrations` -- ALTER TABLE `migrations` ADD PRIMARY KEY (`id`); -- -- Indeks untuk tabel `password_resets` -- ALTER TABLE `password_resets` ADD KEY `password_resets_email_index` (`email`); -- -- Indeks untuk tabel `users` -- ALTER TABLE `users` ADD PRIMARY KEY (`id`), ADD UNIQUE KEY `users_email_unique` (`email`); -- -- AUTO_INCREMENT untuk tabel yang dibuang -- -- -- AUTO_INCREMENT untuk tabel `calas` -- ALTER TABLE `calas` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=19; -- -- AUTO_INCREMENT untuk tabel `failed_jobs` -- ALTER TABLE `failed_jobs` MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT; -- -- AUTO_INCREMENT untuk tabel `migrations` -- ALTER TABLE `migrations` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5; -- -- AUTO_INCREMENT untuk tabel `users` -- ALTER TABLE `users` MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=43; COMMIT; /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; <file_sep><?php namespace App\Http\Controllers; use Illuminate\Http\Request; use App\Calas; use Auth; class CalasController extends Controller { /** * Display a listing of the resource. * * @return \Illuminate\Http\Response */ public function index() { $calas = Calas::all(); return view('calas.index',compact('calas')); } /** * Show the form for creating a new resource. * * @return \Illuminate\Http\Response */ public function create() { // } /** * Store a newly created resource in storage. * * @param \Illuminate\Http\Request $request * @return \Illuminate\Http\Response */ public function store(Request $request) { // } /** * Display the specified resource. * * @param int $id * @return \Illuminate\Http\Response */ public function show($id) { // } /** * Show the form for editing the specified resource. * * @param int $id * @return \Illuminate\Http\Response */ public function edit($id) { $calas = Calas::find($id); return view('calas.edit',compact('calas')); } /** * Update the specified resource in storage. * * @param \Illuminate\Http\Request $request * @param int $id * @return \Illuminate\Http\Response */ public function update(Request $request, $id) { // dd($request->all()); $calas=\App\Calas::find($id); $calas->update($request->all()); return redirect('/calas')->with('status','Data calas berhasil diubah'); } /** * Remove the specified resource from storage. * * @param int $id * @return \Illuminate\Http\Response */ public function destroy($id) { $calas=\App\Calas::find($id); $calas->delete(); return redirect('/siswa')->with('status','Data calas berhasil dihapus!'); } public function logout(Request $request) { Auth::logout(); return redirect('/login'); } public function register(){ return view('sites.register'); } public function postregister(Request $request) { //request user $user= new \App\User; $user->role='calas'; $user->name=$request->nama; $user->email=$request->email; $user->password=<PASSWORD>($request->password); $user->save(); // insert ke tabel calas if($request->hasFile('avatar')) { $destination= "avatar"; $filename = $request->file('avatar'); $filename ->move($destination, $filename->getClientOriginalName()); } if($request->hasFile('krs')) { $dada= "krs"; $krsname = $request->file('krs'); $krsname ->move($dada, $krsname->getClientOriginalName()); } $calas = new Calas; $calas->npm= $request->npm; $calas->nama= $request->nama; $calas->kelas= $request->kelas; $calas->jurusan= $request->jurusan; $calas->fakultas= $request->fakultas; $calas->alamat= $request->alamat; $calas->nomor_telepon= $request->nomor_telepon; $calas->krs = 'krs/'.$krsname->getClientOriginalName(); $calas->avatar='avatar/'. $filename->getClientOriginalName(); $calas->user_id= $user->id; $calas->status_kelulusan= 'Tidak Ada Status'; $calas->save(); return redirect()->back(); } public function status() { return view('calas.status'); } } <file_sep>-- phpMyAdmin SQL Dump -- version 4.8.5 -- https://www.phpmyadmin.net/ -- -- Host: 1172.16.31.10 -- Waktu pembuatan: 06 Nov 2019 pada 10.43 -- Versi server: 10.1.38-MariaDB -- Versi PHP: 7.3.3 SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO"; SET AUTOCOMMIT = 0; START TRANSACTION; SET time_zone = "+00:00"; /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; /*!40101 SET NAMES utf8mb4 */; -- -- Database: `s-p-a` -- -- -------------------------------------------------------- -- -- Struktur dari tabel `calas` -- CREATE TABLE `calas` ( `id` int(10) UNSIGNED NOT NULL, `npm` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `nama` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `kelas` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `jurusan` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `fakultas` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `alamat` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `nomor_telepon` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `krs` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `avatar` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `user_id` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data untuk tabel `calas` -- INSERT INTO `calas` (`id`, `npm`, `nama`, `kelas`, `jurusan`, `fakultas`, `alamat`, `nomor_telepon`, `krs`, `avatar`, `user_id`, `created_at`, `updated_at`) VALUES (1, '51416339', '<NAME>', '4IA05', 'Teknik Informatika', 'Teknologi INdustri', 'Bogor', '081284418504', 'img/krs/krs.jpg', 'img/avatar/bambang.jpg', '1', NULL, NULL), (2, '51416339', 'bambang', '4IA01', 'Teknik Kimia', 'Teknologi Pangan', 'sasasa', '081284418504', 'img/krs/krs.jpg', 'C:\\xampp\\tmp\\php9112.tmp', '2', '2019-11-05 05:23:45', '2019-11-05 05:23:45'), (4, '51416339', 'bambang', '4IA01', 'Teknik Kimia', 'Teknologi Pangan', 'sdsds', '081284418504', 'C:\\xampp\\tmp\\php502D.tmp', 'C:\\xampp\\tmp\\php502E.tmp', '4', '2019-11-05 05:32:13', '2019-11-05 05:32:13'), (5, '51416339', 'bambang', '4IA01', 'Teknik Mesin', 'Teknologi Pangan', 'zxzxz', '081284418504', 'C:\\xampp\\tmp\\phpAD75.tmp', 'C:\\xampp\\tmp\\phpAD76.tmp', '5', '2019-11-05 07:26:13', '2019-11-05 07:26:13'), (6, '51416339', 'bambang', '4IA01', 'Teknik Kimia', 'Teknologi Pangan', 'jl samber gledek no 90 re3 rw2', '081284418501', 'C:\\xampp\\tmp\\phpDF3E.tmp', 'C:\\xampp\\tmp\\phpDF4F.tmp', '7', '2019-11-06 02:06:00', '2019-11-06 02:06:00'), (7, '51416339', 'bambang', '4IA01', 'Teknik Kimia', 'Teknologi Pangan', 'sa', '081284418504', 'C:\\xampp\\tmp\\php5A5D.tmp', 'C:\\xampp\\tmp\\php5A5E.tmp', '8', '2019-11-06 02:34:55', '2019-11-06 02:34:55'); -- -------------------------------------------------------- -- -- Struktur dari tabel `failed_jobs` -- CREATE TABLE `failed_jobs` ( `id` bigint(20) UNSIGNED NOT NULL, `connection` text COLLATE utf8mb4_unicode_ci NOT NULL, `queue` text COLLATE utf8mb4_unicode_ci NOT NULL, `payload` longtext COLLATE utf8mb4_unicode_ci NOT NULL, `exception` longtext COLLATE utf8mb4_unicode_ci NOT NULL, `failed_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Struktur dari tabel `migrations` -- CREATE TABLE `migrations` ( `id` int(10) UNSIGNED NOT NULL, `migration` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `batch` int(11) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data untuk tabel `migrations` -- INSERT INTO `migrations` (`id`, `migration`, `batch`) VALUES (1, '2014_10_12_000000_create_users_table', 1), (2, '2014_10_12_100000_create_password_resets_table', 1), (3, '2019_08_19_000000_create_failed_jobs_table', 1), (4, '2019_11_05_091950_create_calas_table', 1); -- -------------------------------------------------------- -- -- Struktur dari tabel `password_resets` -- CREATE TABLE `password_resets` ( `email` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `token` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Struktur dari tabel `users` -- CREATE TABLE `users` ( `id` bigint(20) UNSIGNED NOT NULL, `name` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `email` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `email_verified_at` timestamp NULL DEFAULT NULL, `password` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `role` varchar(191) COLLATE utf8mb4_unicode_ci NOT NULL, `remember_token` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data untuk tabel `users` -- INSERT INTO `users` (`id`, `name`, `email`, `email_verified_at`, `password`, `role`, `remember_token`, `created_at`, `updated_at`) VALUES (1, '<NAME>', '<EMAIL>', NULL, '$2y$10$zTtHgCeneUR52CFUR2xJieU1iSWPGMTRD1AEpkQh9JYrxFQ.DOJ1y', 'admin', '<PASSWORD>EiY<PASSWORD>Txb53rg906g<PASSWORD>4n9<PASSWORD>kCWva6FZXKaC', '2019-11-05 02:33:36', '2019-11-05 02:33:36'), (2, 'bambang', '<EMAIL>', NULL, '$2y$10$w9hdvfInUrrdJtgYNGBH6uPIYmSr4yOQ6.kOoXVL3nhMwKjRgkPkC', 'calas', NULL, '2019-11-05 05:23:45', '2019-11-05 05:23:45'), (4, 'bambang', '<EMAIL>', NULL, '$2y$10$5DzAgNZ1o9AjQlkXQW7/X.9WIWPdta1i3zp80nQQCMoxiFs9PwHwa', 'calas', NULL, '2019-11-05 05:32:13', '2019-11-05 05:32:13'), (5, 'bambang', '<EMAIL>', NULL, '$2y$10$hBQqkPz2TELxDQ5mLXE86elXseWqG/XIzJ9mXIIXg1c6LwTrsCuVq', 'calas', NULL, '2019-11-05 07:26:13', '2019-11-05 07:26:13'), (7, 'bambang', '<EMAIL>', NULL, '$2y$10$1GACSQwTtUHIwV54rGguSerQG4rYT4o7oTgS506L6eVbt7jrIH6oy', 'calas', NULL, '2019-11-06 02:05:59', '2019-11-06 02:05:59'), (8, 'bambang', '<EMAIL>', NULL, '$2y$10$QSj6fQt2rvoIUm77jMhivO7XZPTA62Liys1f8DXTdfqj11aM5FtXa', 'calas', NULL, '2019-11-06 02:34:55', '2019-11-06 02:34:55'); -- -- Indexes for dumped tables -- -- -- Indeks untuk tabel `calas` -- ALTER TABLE `calas` ADD PRIMARY KEY (`id`); -- -- Indeks untuk tabel `failed_jobs` -- ALTER TABLE `failed_jobs` ADD PRIMARY KEY (`id`); -- -- Indeks untuk tabel `migrations` -- ALTER TABLE `migrations` ADD PRIMARY KEY (`id`); -- -- Indeks untuk tabel `password_resets` -- ALTER TABLE `password_resets` ADD KEY `password_resets_email_index` (`email`); -- -- Indeks untuk tabel `users` -- ALTER TABLE `users` ADD PRIMARY KEY (`id`), ADD UNIQUE KEY `users_email_unique` (`email`); -- -- AUTO_INCREMENT untuk tabel yang dibuang -- -- -- AUTO_INCREMENT untuk tabel `calas` -- ALTER TABLE `calas` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=8; -- -- AUTO_INCREMENT untuk tabel `failed_jobs` -- ALTER TABLE `failed_jobs` MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT; -- -- AUTO_INCREMENT untuk tabel `migrations` -- ALTER TABLE `migrations` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5; -- -- AUTO_INCREMENT untuk tabel `users` -- ALTER TABLE `users` MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=9; COMMIT; /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; <file_sep><?php namespace App; use Illuminate\Database\Eloquent\Model; class Calas extends Model { protected $table='calas'; protected $fillable = ['npm', 'nama', 'kelas','jurusan','fakultas','alamat','nomor_telepon','krs','avatar','user_id','status_kelulusan']; } <file_sep><?php /* |-------------------------------------------------------------------------- | Web Routes |-------------------------------------------------------------------------- | | Here is where you can register web routes for your application. These | routes are loaded by the RouteServiceProvider within a group which | contains the "web" middleware group. Now create something great! | */ // Route::get('/', function () { // return view('welcome'); // }); Route::get('/', 'SiteController@home'); Auth::routes(); Route::name('user')->group(function(){ Route::get('/calas', 'CalasController@index'); Route::get('/status_kelulusan', 'CalasController@status'); Route::get('/calas/{id}/edit', 'CalasController@edit'); Route::get('/calas/{id}/delete', 'CalasController@destroy'); Route::post('/calas/{id}/update', 'CalasController@update'); }); Route::get('/home', 'HomeController@index')->name('home'); Route::get('/registercalas', 'CalasController@register'); Route::post('/postregister','CalasController@postregister');
a87e0a788140f55528d601cabdf9535aaee129bc
[ "SQL", "PHP" ]
5
SQL
Bambangsolehudin/Daftar-Aslab
95e952a394cb7c4d1e13d45e67067bc79493bd4a
1229a3f6462e5bab12cc8219cf54dd3f64474f1a
refs/heads/master
<file_sep>// // EmojiSelectionViewController.swift // ShoppingCart // // Created by <NAME> on 8/10/16. // Copyright © 2016 Gamesmith, LLC. All rights reserved. // import UIKit class EmojiSelectionViewController: UIViewController { @IBOutlet weak var emojiText1: UITextField! @IBOutlet weak var emojiText2: UITextField! var emojiDelegate: EmojiCreation? @IBAction func saveButtonPressed(_ sender: UIButton) { guard let emoji1 = emojiText1.text else { return } guard let emoji2 = emojiText2.text else { return } emojiDelegate?.create(emojiGroup: (emoji1, emoji2)) dismiss(animated: true, completion: nil) } override func viewDidLoad() { super.viewDidLoad() view.backgroundColor = UIColor(red:0.22, green:0.33, blue:0.58, alpha:1.00) } } <file_sep>// // ShoppingListViewController.swift // ShoppingCart // // Created by <NAME> on 8/10/16. // Copyright © 2016 Gamesmith, LLC. All rights reserved. // import UIKit // TODO: Create protocol here. //(3) - Head back to the ShoppingListViewController.swift file. Near the top of this file, right below the import UIKit line of code, we will be creating a protocol. //Create a protocol called EmojiCreation. In your implementation of this protocol should be one requirement. A function called create(emojiGroup:) that takes in one argument called emojiGroup of type (String, String). //If someone was to call on this function, they would do so like this: //create(emojiGroup: ("😋", "🤕")) //The two parenthesis seem weird when calling that function (we'll go into that more later), but that's because the type of the argument of this function is a tuple and you create a tuple using parenthesis like that. protocol EmojiCreation { func create(emojiGroup: (String, String)) } class ShoppingViewController: UIViewController { @IBOutlet weak var tableView: UITableView! var emojis: [(String, String)] = [] { didSet { tableView.reloadData() } } override func viewDidLoad() { super.viewDidLoad() view.backgroundColor = UIColor(red:0.75, green:0.18, blue:0.27, alpha:1.00) tableView.backgroundColor = UIColor(red:0.85, green:0.37, blue:0.29, alpha:1.0) tableView.delegate = self tableView.dataSource = self tableView.allowsSelection = false } override func prepare(for segue: UIStoryboardSegue, sender: Any?) { let destination = segue.destination as! EmojiSelectionViewController destination.emojiDelegate = self } } // MARK: - UITableViewDataSource Methods extension ShoppingViewController: UITableViewDataSource { func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return emojis.count } func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { let cell = tableView.dequeueReusableCell(withIdentifier: "EmojiCell", for: indexPath) as! EmojiCellTableViewCell let currentEmojiLove = emojis[(indexPath as NSIndexPath).row] cell.firstEmojiLabel.text = currentEmojiLove.0 cell.secondEmojiLabel.text = currentEmojiLove.1 return cell } } // MARK: - UITableViewDelegate Methods extension ShoppingViewController: UITableViewDelegate { } //(4) In the ShoppingListViewController.swift file, scroll down to the bottom and create an extension on the ShoppingViewController where within the extension you're adopting the EmojiCreation. Very similar to how it's being done with the Data Source and Delegate protocols. extension ShoppingViewController: EmojiCreation { func create(emojiGroup: (String, String)) { emojis.append(emojiGroup) } }
7ad42e944c4f69bf4d13504fc0b3730ae601f36b
[ "Swift" ]
2
Swift
one-for-all/swift-ProtocolDelegate-lab-swift-intro-000
07c46ea5a0ce72bbcd63aea60298d11fe46ce5b6
a641676b19cd38ed5d7c96e40dfa75f96396eb2d
refs/heads/master
<file_sep>package com.test; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.List; import static org.junit.jupiter.api.Assertions.*; class ListPracticeTest { private ListPractice listPractice; @BeforeEach void setUp() { listPractice = new ListPractice(); } @Test void testListSizeIsReturned() { //prepare List<String> testList = new ArrayList<>(); testList.add("a"); testList.add("b"); //execute int size = listPractice.getMeSizeOfList(testList); //verify assertEquals(2, size); } @Test void testAddReturnsIncreasedList() { //prepare List<Integer> initialList = new ArrayList<>(); initialList.add(1); initialList.add(2); initialList.add(2); initialList.add(2); //execute listPractice.addElement(initialList, 50); //verify assertEquals(5, initialList.size()); assertEquals(50, initialList.get(initialList.size()-1)); } @Test void testRemoveReturnsDecreasedList(){ //Prepare List<Integer> primaryList =new ArrayList<>(); primaryList.add(2); primaryList.add(3); primaryList.add(6); //Execute listPractice.removeElement(primaryList, 3); //Verify assertEquals(2, primaryList.size()); assertFalse(primaryList.contains(6)); } }<file_sep>package com.test; import java.util.List; public class ListPractice { public void removeElement(List<Integer> primaryList, int positionToBeRemoved) { primaryList.remove(positionToBeRemoved-1); } public int getMeSizeOfList(List<String> testList) { int size = testList.size(); return size; } public void addElement(List<Integer> initialList, int elementToBeAdded) { initialList.add(elementToBeAdded); } }
685be818bce123d2ea9f3d583e7eb5e4721bf0cf
[ "Java" ]
2
Java
AmritaDatta/Collections-practice
7c54c377414539a9113516a89bdb2f7ecdfeb816
08b4f865cd247c231aa21e3b35319475d0e5b815
refs/heads/master
<file_sep>// ==UserScript== // @name instatweets // @namespace http://babyl.ca/instatweets // @include http://search.cpan.org/* // @require http://localhost:3000/javascripts/jquery.js // @require http://localhost:3000/javascripts/jquery.form.js // @require http://localhost:3000/javascripts/jquery.cookies.js // @require http://localhost:3000/javascripts/autoresize.jquery.min.js // ==/UserScript== gm_xhr_bridge(); var insta_root = 'http://localhost:3000'; function submitTweet () { $("#sending_tweet").show(); $.post( insta_root + '/tweet', { 'update': $('#twitter_status').get(0).value, }, function ( data, textStatus ) { $("#sending_tweet").hide(); $("#twitter_status").get(0).value = ""; update_counter(); $('#twitter_term').slideToggle(); } ); }; function update_counter () { var l = $('#twitter_status').get(0).value.length; $('#twitter_counter') .html(l) .css('color', l > 140 ? 'red' : 'black' ); } $(function(){ $("<div id='twit' />" ) .css({ position: "absolute", top: "0px", right: "5px" }) .appendTo('body'); $( '<img id="twitter_logo" src="' + insta_root + '/twitter_logo.png" />' ) .appendTo( '#twit' ) .click( function(){ $('#twitter_term').slideToggle(); }); $('body').append( '<div id="twitter_term" style="padding: 5px; z-index: 20000; display: none; background-color: lightgrey; position: absolute; top: 0px; right: 120px;";>' + '<form method="POST" id="tweet_form">' + ' <textarea id="twitter_status" name="status" style="width: 50em"></textarea>' + ' <input id="submit_tweet" type="button" value="tweet" />' + '</form>' + '<p>characters: <span id="twitter_counter"></span></p>' + '<div id="sending_tweet" style="display: none">sending...</div>' + '' + '<div id="twitter_warnings">' + '</div>' + '' + '<p align="right"><a href="#hide" onclick="$(\'#twitter_term\').slideToggle();return false;">hide</a></p>' + '</div>'); $('#twitter_status').autoResize(); $('<span/>').attr('class','not_auth').html( "you must <a href='" + insta_root + "/authenticate?origin=" + document.location +"'>" + "authenticate</a> yourself " + "on Twitter before you can tweet" ).prependTo('#twitter_warnings'); $.get( insta_root + '/authenticated', function(data) { $('.not_auth').hide(); } ); $('#submit_tweet').click(submitTweet); $('#twitter_status').keyup(update_counter); update_counter(); }); /* ---------------------------------------------------------- */ // Wrapper function function GM_XHR() { this.type = null; this.url = null; this.async = null; this.username = null; this.password = <PASSWORD>; this.status = null; this.headers = {}; this.readyState = null; this.open = function(type, url, async, username, password) { this.type = type ? type : null; this.url = url ? url : null; this.async = async ? async : null; this.username = username ? username : null; this.password = <PASSWORD> ? password : null; this.readyState = 1; }; this.setRequestHeader = function(name, value) { this.headers[name] = value; }; this.abort = function() { this.readyState = 0; }; this.getResponseHeader = function(name) { return this.headers[name]; }; this.send = function(data) { this.data = data; var that = this; GM_xmlhttpRequest({ method: this.type, url: this.url, headers: this.headers, data: this.data, onload: function(rsp) { // Populate wrapper object with all data returned from GM_XMLHttpRequest for (k in rsp) { that[k] = rsp[k]; } }, onerror: function(rsp) { for (k in rsp) { that[k] = rsp[k]; } }, onreadystatechange: function(rsp) { for (k in rsp) { that[k] = rsp[k]; } } }); }; }; function gm_xhr_bridge() { // Author: <NAME> (<EMAIL>) // Date: September 3, 2009 // Version: $Id: gm_jq_xhr.js 240 2009-11-03 17:38:40Z ryan $ // This allows jQuery to make cross-domain XHR by providing // a wrapper for GM_xmlhttpRequest. The difference between // XMLHttpRequest and GM_xmlhttpRequest is that the Greasemonkey // version fires immediately when passed options, whereas the standard // XHR does not run until .send() is called. In order to allow jQuery // to use the Greasemonkey version, we create a wrapper object, GM_XHR, // that stores any parameters jQuery passes it and then creates GM_xmlhttprequest // when jQuery calls GM_XHR.send(). // Tell jQuery to use the GM_XHR object instead of the standard browser XHR $.ajaxSetup({ xhr: function(){return new GM_XHR;} }); }
11c3c0beece71397949a99f31dfd495e2ce8a0ff
[ "JavaScript" ]
1
JavaScript
yanick/instatweets
f687f7cb082071f9cbf62fd9452ff7b7014e9323
156fb7ab6848f3cdadc314e5708d645cc199651b
refs/heads/master
<file_sep>import tkinter def calculate(event): gleichung = t.get() t.delete(0, tkinter.END) try: t.insert(0, eval(gleichung)) except: t.insert(0, "Invalid Syntax") top = tkinter.Tk() t = tkinter.Entry(top) t.grid(row=0, columnspan=3) b1 = tkinter.Button(top, text="1") b1.grid(row=1, column=0) b2 = tkinter.Button(top, text="2") b2.grid(row=1, column=1) b3 = tkinter.Button(top, text="3") b3.grid(row=1, column=2) b4 = tkinter.Button(top, text="4") b4.grid(row=2, column=0) b5 = tkinter.Button(top, text="5") b5.grid(row=2, column=1) b6 = tkinter.Button(top, text="6") b6.grid(row=2, column=2) b7 = tkinter.Button(top, text="7") b7.grid(row=3, column=0) b8 = tkinter.Button(top, text="8") b8.grid(row=3, column=1) b9 = tkinter.Button(top, text="9") b9.grid(row=3, column=2) b0 = tkinter.Button(top, text="0") b0.grid(row=4, column=1) bp = tkinter.Button(top, text="+") bp.grid(row=0, column=3) bm = tkinter.Button(top, text="-") bm.grid(row=1, column=3) bmu = tkinter.Button(top, text="*") bmu.grid(row=2, column=3) bd = tkinter.Button(top, text="/") bd.grid(row=3, column=3) be = tkinter.Button(top, text="=") be.grid(row=4, column=3) bdel = tkinter.Button(top, text="DEL") bdel.grid(row=4, column=2) b1.bind("<Button-1>", lambda x: t.insert(tkinter.END, "1")) b2.bind("<Button-1>", lambda x: t.insert(tkinter.END, "2")) b3.bind("<Button-1>", lambda x: t.insert(tkinter.END, "3")) b4.bind("<Button-1>", lambda x: t.insert(tkinter.END, "4")) b5.bind("<Button-1>", lambda x: t.insert(tkinter.END, "5")) b6.bind("<Button-1>", lambda x: t.insert(tkinter.END, "6")) b7.bind("<Button-1>", lambda x: t.insert(tkinter.END, "7")) b8.bind("<Button-1>", lambda x: t.insert(tkinter.END, "8")) b9.bind("<Button-1>", lambda x: t.insert(tkinter.END, "9")) b0.bind("<Button-1>", lambda x: t.insert(tkinter.END, "0")) bp.bind("<Button-1>", lambda x: t.insert(tkinter.END, "+")) bm.bind("<Button-1>", lambda x: t.insert(tkinter.END, "-")) bmu.bind("<Button-1>", lambda x: t.insert(tkinter.END, "*")) bd.bind("<Button-1>", lambda x: t.insert(tkinter.END, "/")) be.bind("<Button-1>", calculate) bdel.bind("<Button-1>", lambda x: t.delete(0, tkinter.END)) top.mainloop()
606d58fc4398a39e4253fbc9c339fa0179cd31f3
[ "Python" ]
1
Python
henrydied/calculator
49f4b8e45a43cc6321b475e2c505eacd7b82bb0a
d41dd1e6ff0e260e209ae0503dfd8c6f2eff4e4d
refs/heads/master
<file_sep>using System; using System.Threading; using System.Threading.Tasks; using System.Windows.Forms; namespace PrimeNumbers_GUI { public partial class MainForm : Form { private CancellationTokenSource cancellationTokenSource; // syncObj method from @servy42 at social.msdn.microsoft.com // I understand how this works, but not why it's so hacky to pause tasks. // At the time I did this, I did not understand the pause to be extra credit. private object syncObj = new object(); private bool paused; public MainForm() { InitializeComponent(); } private async void startButton_Click(object sender, EventArgs e) { cancellationTokenSource = new CancellationTokenSource(); var token = cancellationTokenSource.Token; // Find all prime numbers starting between the first and last numbers int firstNum = 0; int lastNum = 0; try { firstNum = Convert.ToInt32(startNumTextBox.Text); lastNum = Convert.ToInt32(endNumTextBox.Text); } catch (Exception) { MessageBox.Show("Invalid input. Expected integer.", "Error", MessageBoxButtons.OK, MessageBoxIcon.Warning); } numbersTextBox.Clear(); // Prevent user from messing with certain controls while job is running progressBar1.Minimum = firstNum; progressBar1.Maximum = lastNum; progressBar1.Visible = true; cancelButton.Enabled = true; pauseButton.Enabled = true; startButton.Enabled = false; startNumTextBox.Enabled = false; endNumTextBox.Enabled = false; UseWaitCursor = true; // See which numbers are factors and append them to the numbers text box await Task.Run(() => { for (int i = firstNum; i <= lastNum; i++) { lock (syncObj) { }; if (token.IsCancellationRequested) break; if (IsPrime(i)) { AddNumberToTextBox(i); } } }); // Let the user know we did something even if no prime nums were found if (numbersTextBox.TextLength == 0) { numbersTextBox.Text = "None."; } UseWaitCursor = false; // Reset the form startNumTextBox.Enabled = true; endNumTextBox.Enabled = true; progressBar1.Value = progressBar1.Minimum; progressBar1.Visible = false; cancelButton.Enabled = false; pauseButton.Enabled = false; startButton.Enabled = true; } private bool IsPrime(int num) { if (num < 2) return false; // Look for a number that evenly divides the num for (int i = 2; i <= num / 2; i++) if (num % i == 0) return false; // No divisors means the number is prime return true; } private void AddNumberToTextBox(int num) { try { Invoke((Action)delegate () { numbersTextBox.AppendText(num + "\n"); progressBar1.Value = num; }); } catch (ObjectDisposedException) { // The form was closed before the thread completed. } } private void pauseButton_Click(object sender, EventArgs e) { if (paused == false) { Monitor.Enter(syncObj); paused = true; cancelButton.Enabled = false; pauseButton.Text = "Resume"; UseWaitCursor = false; } else if (paused == true) { paused = false; Monitor.Exit(syncObj); cancelButton.Enabled = true; pauseButton.Text = "Pause"; UseWaitCursor = true; } } private void cancelButton_Click(object sender, EventArgs e) { cancellationTokenSource.Cancel(); } } }
b7c7c423acd707ae9ed0dc7bc507016b34779a4a
[ "C#" ]
1
C#
kjhx-hw/primenumbers
ce37408358b514a9b4a307466fbe910eb179533e
479bdad06920fc0ff4a9a61afb0a4fa8a260ab31
refs/heads/master
<repo_name>AlexShutov/TestApp<file_sep>/app/src/main/java/com/example/lodoss/testapp/DITest/ABModule.java package com.example.lodoss.testapp.DITest; import android.app.Application; import android.content.Context; import dagger.Module; import dagger.Provides; /** * Created by lodoss on 27/01/16. */ @Module public class ABModule { @Provides A provideA1(){ return new A1(); } @Provides B provideB1(){ return new B1(); } } <file_sep>/app/src/main/java/com/example/lodoss/testapp/DITest/A1.java package com.example.lodoss.testapp.DITest; import android.content.Context; import javax.inject.Inject; /** * Created by lodoss on 27/01/16. */ public class A1 implements A { //private Context context; @Inject public A1(){ } @Override public void showMessage(String msg) { System.out.println(msg); } } <file_sep>/app/src/main/java/com/example/lodoss/testapp/RxSampleExec.java package com.example.lodoss.testapp; /** * Created by lodoss on 26/01/16. */ public interface RxSampleExec { void execSample(); } <file_sep>/app/src/main/java/com/example/lodoss/testapp/DITest/C.java package com.example.lodoss.testapp.DITest; import javax.inject.Inject; /** * Created by lodoss on 27/01/16. */ public class C { private A a; private B b; @Inject public C(A a, B b){ this.a = a; this.b = b; } public void exec(){ a.showMessage(b.createMessage()); } } <file_sep>/app/src/main/java/com/example/lodoss/testapp/DITest/A.java package com.example.lodoss.testapp.DITest; /** * Created by lodoss on 27/01/16. */ public interface A { void showMessage(String msg); } <file_sep>/app/src/main/java/com/example/lodoss/testapp/RxLifetimeSamples.java package com.example.lodoss.testapp; import rx.subjects.ReplaySubject; import rx.subjects.Subject; /** * Created by lodoss on 26/01/16. */ public class RxLifetimeSamples implements RxSampleExec { @Override public void execSample() { subscribingSample.testRx(); } public RxSample subscribingSample = () -> { Subject<Integer, Integer> s = ReplaySubject.create(); s.subscribe( v -> System.out.println(v), e -> System.err.println(e)); s.onNext(0); s.onError(new Exception("Some error")); }; } <file_sep>/app/src/main/java/com/example/lodoss/testapp/DITest/AppModule.java package com.example.lodoss.testapp.DITest; import android.app.Application; import android.content.Context; import dagger.Module; import dagger.Provides; /** * Created by lodoss on 27/01/16. */ @Module public class AppModule { private Application appRef; public AppModule(Application app){ this.appRef = app; } @Provides Context provideApplicationReference(){ return appRef; } } <file_sep>/app/src/main/java/com/example/lodoss/testapp/RxSample.java package com.example.lodoss.testapp; /** * Created by lodoss on 26/01/16. */ public interface RxSample { void testRx(); } <file_sep>/app/src/main/java/com/example/lodoss/testapp/DITest/ABComponent.java package com.example.lodoss.testapp.DITest; import com.example.lodoss.testapp.TestActivity; import dagger.Component; /** * Created by lodoss on 27/01/16. */ @Component(modules = {AppModule.class, ABModule.class, A2B2Module.class}) public interface ABComponent { void inject(TestActivity testActivity); } <file_sep>/app/src/main/java/com/example/lodoss/testapp/TestApplication.java package com.example.lodoss.testapp; import android.app.Application; import android.content.Context; import com.example.lodoss.testapp.DITest.A2B2Module; import com.example.lodoss.testapp.DITest.ABComponent; import com.example.lodoss.testapp.DITest.ABModule; import com.example.lodoss.testapp.DITest.AppModule; import com.example.lodoss.testapp.DITest.DaggerABComponent; /** * Created by lodoss on 27/01/16. */ public class TestApplication extends Application { private ABComponent mComponent; @Override public void onCreate() { super.onCreate(); mComponent = DaggerABComponent.builder() .appModule(new AppModule(this)) .aBModule(new ABModule()) .a2B2Module(new A2B2Module()) .build(); } public static ABComponent component(Context context){ return ((TestApplication) context.getApplicationContext()).mComponent; } public ABComponent getComponent(){ return mComponent; } }
f08d28676af96699efd360361aa428a18af735ff
[ "Java" ]
10
Java
AlexShutov/TestApp
09163754ee15e3841de485c6724e12030b5bd671
2faf0794694bc43d0adfa43dc159f13a419253e3
refs/heads/master
<file_sep># image-stitch <file_sep>#! /usr/bin/env node 'use strict' const yargs = require('yargs'); const portrait = require('./portrait'); const stitch = require('./stitch'); yargs .command( 'portrait', 'rotates pictures in current dir', () => {}, portrait ) .command( 'stitch', 'joins pictures together 2 by 2', () => {}, stitch ) .argv; <file_sep>'use strict' const mkdirp = require('mkdirp'); const imageMagick = require('gm').subClass({ imageMagick: true }); const ProgressBar = require('progress'); const fs = require('fs'); const sourceDir = process.cwd(); const outDir = sourceDir + '/rotated/'; module.exports = () => { mkdirp.sync(outDir); const reg = /\.jpg$/; let files = fs.readdirSync(sourceDir).filter(fileName => reg.test(fileName)); let exported = 0; let rotated = 0; const bar = new ProgressBar(`Processing ${files.length} files [:bar] :percent :etas`, { complete: '=', incomplete: ' ', total: files.length }); files.forEach((fileName) => { imageMagick(fileName).size((err, value) => { let degrees = 0; if (value.width > value.height) { degrees = 90; rotated++; } imageMagick(fileName).rotate('#ffffff', degrees).resize(null, 2049).write(outDir + fileName, () => { exported++; bar.tick(); }); }); }); // console.log(`Finished: Rotated ${rotated}, Exported ${exported}`); } <file_sep>'use strict' const mkdirp = require('mkdirp'); const imageMagick = require('gm').subClass({ imageMagick: true }); const ProgressBar = require('progress'); const fs = require('fs'); const sourceDir = process.cwd(); const outDir = sourceDir + '/stitched/'; module.exports = () => { mkdirp.sync(outDir); const reg = /\.jpg$/; let files = fs.readdirSync(sourceDir).filter(fileName => reg.test(fileName)); const len = files.length; let i = 0; const bar = new ProgressBar(`Processing ${files.length} files [:bar] :percent :etas`, { complete: '=', incomplete: ' ', total: files.length }); while (i < len) { const fileNameA = files[i]; const fileNameB = files[++i]; const outFileName = `${outDir}/stitched_${i}.jpg`; imageMagick(fileNameA).append(fileNameB, true).write(outFileName, () => { // exported++; bar.tick(2); }); i++; } }
48efde5b77d1da16e7bc941ad4d1297659fe4f0d
[ "Markdown", "JavaScript" ]
4
Markdown
tomdye/imageStitch
d3c8d5fd26fcae56a591b807c584f079d23beacf
a3da67c6e6b27b8e4fea8805df26b6c116f0b775
refs/heads/master
<repo_name>whiteThor/Recommendations<file_sep>/app/src/main/java/com/ruben/android/sanples/googleplayservices/api/Api.java package com.ruben.android.sanples.googleplayservices.api; import com.ruben.android.sanples.googleplayservices.model.ActiveListings; import retrofit.Callback; import retrofit.http.GET; import retrofit.http.Query; public interface Api { @GET("/listings/active") void activeListenings(@Query("includes") String includes, Callback<ActiveListings> callback); } <file_sep>/app/src/main/java/com/ruben/android/sanples/googleplayservices/google/GoogleServiceHelper.java package com.ruben.android.sanples.googleplayservices.google; import android.app.Activity; import android.content.Intent; import android.content.IntentSender; import android.os.Bundle; import com.google.android.gms.common.ConnectionResult; import com.google.android.gms.common.GooglePlayServicesUtil; import com.google.android.gms.common.api.GoogleApiClient; import com.google.android.gms.plus.Plus; import com.ruben.android.sanples.googleplayservices.MainActivity; public class GoogleServiceHelper implements GoogleApiClient.ConnectionCallbacks, GoogleApiClient.OnConnectionFailedListener{ private GoogleServiceListener mGoogleServiceListener; private MainActivity mActivity; private GoogleApiClient mGoogleApiClient; public static final int REQUEST_CODE_RESOLUTION = -100; public static final int REQUEST_CODE_AVAILABILITY = -101; public GoogleServiceHelper(MainActivity mainActivity, GoogleServiceListener googleServiceListener) { mActivity = mainActivity; mGoogleServiceListener = googleServiceListener; mGoogleApiClient = new GoogleApiClient .Builder(mainActivity) .addConnectionCallbacks(this) .addOnConnectionFailedListener(this) .addApi(Plus.API, Plus.PlusOptions.builder().setServerClientId("<KEY>.apps.googleusercontent.com") .build()) .build() ; } public void connect(){ if(isGooglePlayServicesAvailable()) mGoogleApiClient.connect(); else mGoogleApiClient.disconnect(); } public void disconnect(){ if(isGooglePlayServicesAvailable()) mGoogleApiClient.disconnect(); else mGoogleServiceListener.onDisconnected(); } public boolean isGooglePlayServicesAvailable(){ int availability = GooglePlayServicesUtil.isGooglePlayServicesAvailable(mActivity); switch (availability){ case ConnectionResult.SUCCESS: return true; case ConnectionResult.SERVICE_VERSION_UPDATE_REQUIRED: case ConnectionResult.SERVICE_DISABLED: case ConnectionResult.SERVICE_INVALID: GooglePlayServicesUtil.getErrorDialog(availability,mActivity,REQUEST_CODE_AVAILABILITY).show(); return false; default:return false; } } @Override public void onConnected(Bundle bundle) { mGoogleServiceListener.onConnected(); } @Override public void onConnectionSuspended(int i) { mGoogleServiceListener.onDisconnected(); } @Override public void onConnectionFailed(ConnectionResult connectionResult) { if(connectionResult.hasResolution() ){ try { connectionResult.startResolutionForResult(mActivity,REQUEST_CODE_RESOLUTION); } catch (IntentSender.SendIntentException e) { connect(); } }else{ mGoogleServiceListener.onDisconnected(); } } public void handleActitvityResult(int requestCode, int resultCode, Intent data ){ if(resultCode == Activity.RESULT_OK){ connect(); }else{ mGoogleServiceListener.onDisconnected(); } } public interface GoogleServiceListener{ void onConnected(); void onDisconnected(); } }
5c2eea84405b0ab93a2a45fdb25d4f87e68efbfe
[ "Java" ]
2
Java
whiteThor/Recommendations
32d278bcc6d692551a44ca4b1c01a57e79c6285b
70cb63c7dcd3abb0922494a5686365ddabfbd91d
refs/heads/master
<file_sep>import { Component, OnInit } from '@angular/core'; import { AdminLeagueService } from '../services/league.service'; @Component({ selector: 'app-leagues', templateUrl: './leagues.component.html', styleUrls: ['./leagues.component.scss'] }) export class LeaguesComponent implements OnInit { leagues:any; constructor(private adminLeagueService : AdminLeagueService) { } ngOnInit() { this.adminLeagueService.getLeagues().subscribe((data) =>{ this.leagues = data['result']; }) } } <file_sep>import { BrowserModule } from '@angular/platform-browser'; import { NgModule } from '@angular/core'; import { HttpClientModule } from '@angular/common/http'; import { AppRoutingModule } from './app-routing.module'; import { AppComponent } from './app.component'; import { LandingComponent } from './landing/landing.component'; import { DashboardComponent } from './dashboard/dashboard.component'; import { AdminComponent } from './admin/admin.component'; import { LeaguesComponent } from './admin/leagues/leagues.component'; import { CreateLeagueComponent } from './admin/leagues/create-league/create-league.component'; import { ReactiveFormsModule } from '@angular/forms'; import { AdminLeagueService } from './admin/services/league.service'; @NgModule({ declarations: [ AppComponent, LandingComponent, DashboardComponent, AdminComponent, LeaguesComponent, CreateLeagueComponent ], imports: [ BrowserModule, AppRoutingModule, ReactiveFormsModule, HttpClientModule ], providers: [ AdminLeagueService ], bootstrap: [AppComponent] }) export class AppModule { } <file_sep>import { Component, OnInit } from '@angular/core'; import { FormControl, FormGroup, Form } from '@angular/forms'; import { AdminLeagueService } from '../../services/league.service'; @Component({ selector: 'app-create-league', templateUrl: './create-league.component.html', styleUrls: ['./create-league.component.scss'] }) export class CreateLeagueComponent implements OnInit { form: FormGroup; constructor( private adminLeagueService : AdminLeagueService ) { } ngOnInit() { this.form = new FormGroup ({ title: new FormControl(''), }); } save() { this.adminLeagueService.postAddPage(this.form.value).subscribe(resp=>{console.log(resp), err=>console.log(err)}); } } <file_sep>// import { Http } from '@angular/http'; import { HttpClient } from '@angular/common/http'; // import 'rxjs/add/operator/map'; import { Injectable } from '@angular/core'; @Injectable() export class AdminLeagueService { constructor(private http: HttpClient) { } getLeagues() { // return this.http.get('http://localhost:3000/pages') // .map(res => res.json()); return this.http.get('http://localhost:3000/leagues'); } getPage(slug) { // return this.http.get('http://localhost:3000/pages/' + slug) // .map(res => res.json()); return this.http.get('http://localhost:3000/pages/' + slug); } postAddPage(value) { // return this.http.post('http://localhost:3000/pages/add-page', value) // .map(res => res.json()); return this.http.post('http://localhost:3000/leagues/add', value); } getEditPage(id) { // return this.http.get('http://localhost:3000/pages/edit-page/' + id) // .map(res => res.json()); return this.http.get('http://localhost:3000/pages/edit-page/' + id); } postEditPage(value) { // return this.http.post('http://localhost:3000/pages/edit-page/'+value.id, value) // .map(res => res.json()); return this.http.post('http://localhost:3000/pages/edit-page/'+value.id, value); } }
fe68c0b2b00e1b181fcbd19042bcccb69c3b5937
[ "TypeScript" ]
4
TypeScript
malcovich/STINSP
d31370eb495970bb897a84b6b8df3397328e0b8e
ab3ba4bebf862cb841d3d130b15016f24e2157a2
refs/heads/master
<repo_name>AhmedGamal1496/MITx-6.00.1x<file_sep>/Week 2/Pset2.py originalBalance = 4773 remainingBalance = 4773 annualInterestRate = 0.2 minimumFixedMonthlyPayment = 10 i = 0 while True: i = 0 while i < 12: balance = remainingBalance monthlyInterestRate = annualInterestRate / 12 unpaidBalance = balance - minimumFixedMonthlyPayment interest = unpaidBalance * monthlyInterestRate remainingBalance = round((unpaidBalance + interest),2) i += 1 if remainingBalance < 0: break else: minimumFixedMonthlyPayment = minimumFixedMonthlyPayment + 10 remainingBalance = originalBalance print('Lowest Payment: ' + str(minimumFixedMonthlyPayment))<file_sep>/README.md # MITx-6.00.1x Check the 5 weeks problem sets from MITx-6.00.1x course on edX <file_sep>/Week 2/Pset3.3.py originalBalance = 320000 remainingBalance = 320000 annualInterestRate = 0.2 monthlyInterestRate = annualInterestRate / 12.0 lowerBound = originalBalance / 12.0 upperBound = (originalBalance * (1 + monthlyInterestRate)**12) / 12.0 while abs(remainingBalance) > 0.01: minimumFixedMonthlyPayment = (lowerBound + upperBound) / 2 balance = originalBalance for i in range(12): unpaidBalance = balance - minimumFixedMonthlyPayment interest = unpaidBalance * monthlyInterestRate remainingBalance = round((unpaidBalance + interest),2) balance = remainingBalance if remainingBalance > 0.01: lowerBound = minimumFixedMonthlyPayment elif remainingBalance < -0.01: upperBound = minimumFixedMonthlyPayment else: break print('Lowest Payment: ' + str(round(minimumFixedMonthlyPayment,2))) <file_sep>/Week 3/Pset.py def biggest(aDict): ''' aDict: A dictionary, where all the values are lists. returns: The key with the largest number of values associated with it ''' num = 0 for k in aDict: if len(aDict[k]) >= num: num = len(aDict[k]) key = k return key <file_sep>/Week 1/Pset 1.py s = 'abcdefghijklmnopqrstuvwxyz' words = '' words_2 = '' for i in range(len(s)-1): if s[i+1] >= s[i]: words = words + s[i] else: words = words + s[i] if len(words_2) < len(words): words_2 = words words = '' if i == len(s) - 2 and words != '' and words_2 == '': if s[i+1] >= s[i]: words_2 = words + s[i+1] print('Longest substring in alphabetical order is: ' + str(words_2)) <file_sep>/Week 3/Midterm4.py def isWordGuessed(secretWord, lettersGuessed): if len(lettersGuessed) == 0: return False else: for k in lettersGuessed: if k in secretWord: if k == lettersGuessed[len(lettersGuessed)-1]: return True else: continue else: if k == lettersGuessed[len(lettersGuessed)-1]: return False else: continue def getGuessedWord(secretWord, lettersGuessed): str = '' for k in secretWord: if k in lettersGuessed: str = str + k + '' else: str = str + '_' return str def getAvailableLetters(lettersGuessed): import string new_string = string.ascii_lowercase for k in lettersGuessed: if k in string.ascii_lowercase: new_string = new_string.replace(k, '') return new_string def hangman(secretWord): print("Welcome to the game, Hangman!") print('I am thinking of a word that is ' + str(len(secretWord)) + ' letters long.') print('-------------') guess = [] guessedWord = '' check = True i = 8 while guessedWord != secretWord and i != 1: if not check: i -= 1 print('You have ' + str(i) + ' guesses left.') Availableletters = getAvailableLetters(guess) print("Available letters: " + str(Availableletters)) x = input('Please guess a letter: ') if x in guess: print("Oops! You've already guessed that letter: " + str(guessedWord)) else: guess.append(x) check = isWordGuessed(secretWord, guess) if check: guessedWord = getGuessedWord(secretWord, guess) print('Good guess: ' + str(guessedWord)) else: print('Oops! That letter is not in my word: ' + str(getGuessedWord(secretWord, guess))) print('------------') if guessedWord == secretWord: print('Congratulations, you won!') elif i == 1: print('Sorry, you ran out of guesses. The word was else.')
0353d3d0dfe2e7b5d1224ae6680c1d60cc153248
[ "Markdown", "Python" ]
6
Python
AhmedGamal1496/MITx-6.00.1x
d83cda029bf8ab6c7bea67f556faf79aa9e072a6
915c8650d1b953ebeccd7c7ae552e4f45f9feaad
refs/heads/develop
<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_METABOLOMICSLATENTTRAVERSALDATASIMULATOR_H #define EVONET_METABOLOMICSLATENTTRAVERSALDATASIMULATOR_H // .h #include <EvoNet/simulator/BiochemicalDataSimulator.h> namespace EvoNet { template<typename TensorT> class MetabolomicsLatentTraversalDataSimulator : public BiochemicalDataSimulator<TensorT> { public: int n_continuous_steps_ = 16; void makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void readAndProcessMetabolomicsTrainingAndValidationData(int& n_reaction_ids_training, int& n_labels_training, int& n_component_group_names_training, int& n_reaction_ids_validation, int& n_labels_validation, int& n_component_group_names_validation, const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train, const std::string& meta_data_filename_train, const std::string& metabo_data_filename_test, const std::string& meta_data_filename_test, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& offline_linear_scale_input, const bool& offline_log_transform_input, const bool& offline_standardize_input, const bool& online_linear_scale_input, const bool& online_log_transform_input, const bool& online_standardize_input, int& n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int& n_epochs, const int& batch_size, const int& memory_size) override; }; template<typename TensorT> inline void MetabolomicsLatentTraversalDataSimulator<TensorT>::makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int & n_epochs, const int & batch_size, const int & memory_size, const int & n_input_nodes, const int & n_loss_output_nodes, const int & n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes assert(n_input_nodes == this->n_encodings_continuous_ + this->n_encodings_discrete_); assert(n_loss_output_nodes == data_training.dimension(0)); assert(n_metric_output_nodes == data_training.dimension(0)); assert(data_training.dimension(0) == features.size()); assert(data_training.dimension(1) == labels_training.size()); assert(this->n_encodings_continuous_ > 0); assert(this->n_encodings_discrete_ > 0); assert(batch_size > 0); assert(memory_size == 1); assert(n_epochs == this->n_continuous_steps_ * this->n_encodings_continuous_* this->n_encodings_discrete_ * this->labels_training_.size()); // Gaussian sampler traversal: const TensorT step_size = (0.95 - 0.05) / (this->n_continuous_steps_ - 1); // Assign the encoding values by sampling the 95% confidence limits of the inverse normal distribution Eigen::Tensor<TensorT, 4> gaussian_samples(batch_size, memory_size, this->n_encodings_continuous_, n_epochs); gaussian_samples.setZero(); // Concrete Sampler Eigen::Tensor<TensorT, 4> categorical_samples(batch_size, memory_size, this->n_encodings_discrete_, n_epochs); categorical_samples.setZero(); int continuous_steps_iter = 0; int encodings_continuous_iter = 0; int encodings_discrete_iter = 0; for (int e = 0; e < n_epochs; e += this->n_continuous_steps_) { // Slices for the epoch Eigen::array<Eigen::Index, 4> offset1 = { 0, 0, 0, e }; Eigen::array<Eigen::Index, 4> span1 = { batch_size, memory_size, this->n_encodings_continuous_, this->n_continuous_steps_ }; Eigen::array<Eigen::Index, 4> span2 = { batch_size, memory_size, this->n_encodings_discrete_, this->n_continuous_steps_ }; // for each epoch, sample the confidence intervals of the next encoding node... gaussian_samples.slice(offset1, span1).chip(encodings_continuous_iter, 2) = (gaussian_samples.slice(offset1, span1).chip(encodings_continuous_iter, 2).constant(step_size).cumsum(2) + gaussian_samples.slice(offset1, span1).chip(encodings_continuous_iter, 2).constant(TensorT(0.05) - step_size)).ndtri(); // for each epoch, iterate the next label of the next categorical node... categorical_samples.slice(offset1, span2).chip(encodings_discrete_iter, 2) = categorical_samples.slice(offset1, span2).chip(encodings_discrete_iter, 2).constant(TensorT(1)); ++encodings_continuous_iter; if (encodings_continuous_iter >= this->n_encodings_continuous_) { encodings_continuous_iter = 0; ++encodings_discrete_iter; } if (encodings_discrete_iter >= this->n_encodings_discrete_) { encodings_continuous_iter = 0; encodings_discrete_iter = 0; } } // initialize the Tensors this->input_data_training_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_training_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_training_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_training_.resize(batch_size, memory_size, n_epochs); // expand the training data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_training.dimension(1))), 1); assert(expansion_factor == 1); const int over_expanded = data_training.dimension(1)*expansion_factor - batch_size * n_epochs; assert(over_expanded == 0); assert(batch_size * memory_size * n_epochs == data_training.dimension(1)*expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_training_expanded(data_training.dimension(0), data_training.dimension(1)*expansion_factor); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i*data_training.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_training.dimension(0), data_training.dimension(1) }; data_training_expanded.slice(offset1, span1) = data_training; } // assign the input tensors auto data_training_expanded_4d = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), data_training.dimension(1)*expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_training.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = gaussian_samples; this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, this->n_encodings_continuous_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = categorical_samples; // assign the loss tensors this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_loss_output_nodes, n_epochs })) = data_training_expanded_4d; // assign the metric tensors this->metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_metric_output_nodes, n_epochs })) = data_training_expanded_4d; } template<typename TensorT> inline void MetabolomicsLatentTraversalDataSimulator<TensorT>::makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes assert(n_input_nodes == this->n_encodings_continuous_ + this->n_encodings_discrete_); assert(n_loss_output_nodes == data_validation.dimension(0)); assert(n_metric_output_nodes == data_validation.dimension(0)); assert(data_validation.dimension(0) == features.size()); assert(data_validation.dimension(1) == labels_validation.size()); assert(this->n_encodings_continuous_ > 0); assert(this->n_encodings_discrete_ > 0); assert(batch_size > 0); assert(memory_size == 1); assert(n_epochs == this->n_continuous_steps_ * this->n_encodings_continuous_ * this->n_encodings_discrete_ * this->labels_training_.size()); // Gaussian sampler traversal: const TensorT step_size = (0.95 - 0.05) / (this->n_continuous_steps_ - 1); // Assign the encoding values by sampling the 95% confidence limits of the inverse normal distribution Eigen::Tensor<TensorT, 4> gaussian_samples(batch_size, memory_size, this->n_encodings_continuous_, n_epochs); gaussian_samples.setZero(); // Concrete Sampler Eigen::Tensor<TensorT, 4> categorical_samples(batch_size, memory_size, this->n_encodings_discrete_, n_epochs); categorical_samples.setZero(); int encodings_continuous_iter = 0; int encodings_discrete_iter = 0; for (int e = 0; e < n_epochs; e += this->n_continuous_steps_) { // Slices for the epoch Eigen::array<Eigen::Index, 4> offset1 = { 0, 0, 0, e }; Eigen::array<Eigen::Index, 4> span1 = { batch_size, memory_size, this->n_encodings_continuous_, this->n_continuous_steps_ }; Eigen::array<Eigen::Index, 4> span2 = { batch_size, memory_size, this->n_encodings_discrete_, this->n_continuous_steps_ }; // for each epoch, sample the confidence intervals of the next encoding node... gaussian_samples.slice(offset1, span1).chip(encodings_continuous_iter, 2) = (gaussian_samples.slice(offset1, span1).chip(encodings_continuous_iter, 2).constant(step_size).cumsum(2) + gaussian_samples.slice(offset1, span1).chip(encodings_continuous_iter, 2).constant(TensorT(0.05) - step_size)).ndtri(); // for each epoch, iterate the next label of the next categorical node... categorical_samples.slice(offset1, span2).chip(encodings_discrete_iter, 2) = categorical_samples.slice(offset1, span2).chip(encodings_discrete_iter, 2).constant(TensorT(1)); ++encodings_continuous_iter; if (encodings_continuous_iter >= this->n_encodings_continuous_) { encodings_continuous_iter = 0; ++encodings_discrete_iter; } if (encodings_discrete_iter >= this->n_encodings_discrete_) { encodings_continuous_iter = 0; encodings_discrete_iter = 0; } } // initialize the Tensors this->input_data_validation_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_validation_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_validation_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_validation_.resize(batch_size, memory_size, n_epochs); // expand the validation data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_validation.dimension(1))), 1); if (expansion_factor != 1) { std::cout << "validation expansion_factor = " << expansion_factor << "." << std::endl; }; const int over_expanded = data_validation.dimension(1) * expansion_factor - batch_size * n_epochs; if (over_expanded != 0) { std::cout << "validation over_expanded = " << over_expanded << "." << std::endl; } assert(batch_size * memory_size * n_epochs == data_validation.dimension(1) * expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_validation_expanded(data_validation.dimension(0), data_validation.dimension(1) * expansion_factor); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i * data_validation.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_validation.dimension(0), data_validation.dimension(1) }; data_validation_expanded.slice(offset1, span1) = data_validation; } // assign the input tensors auto data_validation_expanded_4d = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), data_validation.dimension(1) * expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_validation.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = gaussian_samples; this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, this->n_encodings_continuous_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = categorical_samples; // assign the loss tensors this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_loss_output_nodes, n_epochs })) = data_validation_expanded_4d; // assign the metric tensors this->metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_metric_output_nodes, n_epochs })) = data_validation_expanded_4d; } template<typename TensorT> inline void MetabolomicsLatentTraversalDataSimulator<TensorT>::readAndProcessMetabolomicsTrainingAndValidationData(int& n_reaction_ids_training, int& n_labels_training, int& n_component_group_names_training, int& n_reaction_ids_validation, int& n_labels_validation, int& n_component_group_names_validation, const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train, const std::string& meta_data_filename_train, const std::string& metabo_data_filename_test, const std::string& meta_data_filename_test, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& offline_linear_scale_input, const bool& offline_log_transform_input, const bool& offline_standardize_input, const bool& online_linear_scale_input, const bool& online_log_transform_input, const bool& online_standardize_input, int& n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int& n_epochs, const int& batch_size, const int& memory_size) { // Read in the data and make the data matrices std::vector<std::string> labels_training; std::vector<std::string> features_training; Eigen::Tensor<TensorT, 2> data_training; std::vector<std::string> labels_validation; std::vector<std::string> features_validation; Eigen::Tensor<TensorT, 2> data_validation; this->readAndMakeMetabolomicsTrainingAndValidationDataMatrices(n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, features_training, data_training, labels_training, features_validation, data_validation, labels_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, use_concentrations, use_MARs, sample_values, iter_values, fill_sampling, fill_mean, fill_zero, apply_fold_change, fold_change_ref, fold_change_log_base, n_reps_per_sample, false, //randomize_sample_group_names, n_epochs, batch_size, memory_size); // Make the training and validation data caches after an optional transformation step if (use_concentrations) { // Apply offline transformations this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, false, -1, -1, false, -1, -1); // Apply online transformations this->transformTrainingAndValidationDataOnline(data_training, data_validation, online_linear_scale_input, online_log_transform_input, online_standardize_input); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, this->n_encodings_continuous_ + this->n_encodings_discrete_, n_component_group_names_training, n_component_group_names_training, shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, this->n_encodings_continuous_ + this->n_encodings_discrete_, n_component_group_names_training, n_component_group_names_training, shuffle_data_and_labels); } else if (use_MARs) { // Apply offline transformations TensorT min_value = 1e-3; TensorT max_value = 1e3; if (offline_log_transform_input) { min_value = std::log(min_value); max_value = std::log(max_value); } this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, true, min_value, max_value, false, -1, -1); // Apply online transformations this->transformTrainingAndValidationDataOnline(data_training, data_validation, online_linear_scale_input, online_log_transform_input, online_standardize_input); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, this->n_encodings_continuous_ + this->n_encodings_discrete_, n_reaction_ids_validation, n_reaction_ids_validation, shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, this->n_encodings_continuous_ + this->n_encodings_discrete_, n_reaction_ids_validation, n_reaction_ids_validation, shuffle_data_and_labels); } } } #endif //EVONET_METABOLOMICSLATENTTRAVERSALDATASIMULATOR_H<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/io/ModelFile.h> #include <EvoNet/simulator/MNISTSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: /* @brief LSTM classifier Pixel by Pixel MNIST. Examples include the following: arXiv:1511.06464: 128 hidden units, alpha = 1e-3, gradient clipping of 1, highest test accuracy of 98.2% arXiv:1504.00941: 100 hidden units, alpha = 0.01, forget_gate_bias = 1, gradient clipping of 1, lowest test error rate of 3% arXiv:1801.06105: 100 hidden units, alpha = 1e-6, gradient clipping of 1 @param[in, out] model The network model @param[in] n_inputs The number of pixel inputs @param[in] n_outputs The number of classifier outputs @param[in] n_blocks The number of LSTM blocks to add to the network @param[in] n_cells The number of cells in each LSTM block @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeLSTM(Model<TensorT>& model, const int& n_inputs = 784, const int& n_outputs = 10, const int& n_blocks_1 = 128, const int& n_cells_1 = 1, const int& n_blocks_2 = 0, const int& n_cells_2 = 1, const int& n_hidden = 32, const bool& add_forget_gate = true, const bool& add_feature_norm = true, const bool& specify_layers = true, const bool& specify_cyclic_pairs = true) { model.setId(0); model.setName("LSTM"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` std::shared_ptr<ActivationOp<TensorT>> activation, activation_grad, activation_fc, activation_fc_grad; if (add_feature_norm) { activation = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); activation_fc = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_fc_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } else { //activation = std::make_shared<TanHOp<TensorT>>(TanHOp<TensorT>()); //activation_grad = std::make_shared<TanHGradOp<TensorT>>(TanHGradOp<TensorT>()); activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_grad = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_fc = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_fc_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); } //std::shared_ptr<ActivationOp<TensorT>> activation_norm = std::make_shared<TanHOp<TensorT>>(TanHOp<TensorT>()); //std::shared_ptr<ActivationOp<TensorT>> activation_norm_grad = std::make_shared<TanHGradOp<TensorT>>(TanHGradOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_norm = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_norm_grad = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_fc_norm = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_fc_norm_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_output = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_output_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-5, 0.9, 0.999, 1e-8, 10)); // Add the LSTM layer(s) std::vector<std::string> node_names = model_builder.addLSTM(model, "LSTM-01", "LSTM-01", node_names_input, n_blocks_1, n_cells_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_input.size() + n_blocks_1) / 2, 1)), solver_op, 0.0f, 0.0f, true, add_forget_gate, 1, specify_layers, specify_cyclic_pairs); if (add_feature_norm) { node_names = model_builder.addNormalization(model, "LSTM-01-Norm", "LSTM-01-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "LSTM-01-Norm-gain", "LSTM-01-Norm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (n_blocks_2 > 0) { node_names = model_builder.addLSTM(model, "LSTM-02", "LSTM-02", node_names, n_blocks_2, n_cells_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_blocks_2) / 2, 1)), solver_op, 0.0f, 0.0f, true, add_forget_gate, 1, specify_layers, specify_cyclic_pairs); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "LSTM-02-Norm", "LSTM-02-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "LSTM-02-Norm-gain", "LSTM-02-Norm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } // Add a fully connected layer if (n_hidden > 0) { node_names = model_builder.addFullyConnected(model, "FC-01", "FC-01", node_names, n_hidden, activation_fc, activation_fc_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "FC-01-Norm", "FC-01-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC-01-Norm-gain", "FC-01-Norm-gain", node_names, node_names.size(), activation_fc_norm, activation_fc_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } // Add a final output layer node_names = model_builder.addFullyConnected(model, "FC-Out", "FC-Out", node_names, n_outputs, activation_output, activation_output_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 2)), solver_op, 0.0f, 0.0f, false, true); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } /* @brief RNN classifier References arXiv:1504.00941: 100 hidden units, alpha = 10e-8, all weights initialized to 1 and all biases initialized to 0, gradient clipping of 1, lowest test error rate of 3% @param[in, out] model The network model @param[in] n_inputs The number of pixel inputs @param[in] n_outputs The number of classifier outputs @param[in] n_blocks The number of LSTM blocks to add to the network @param[in] n_cells The number of cells in each LSTM block @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeRNN(Model<TensorT>& model, const int& n_inputs = 784, const int& n_outputs = 10, const int& n_hidden_0 = 32, const int& n_hidden_1 = 32, const bool& add_identity = false, const bool& add_feature_norm = true, const bool& specify_layers = true) { model.setId(0); model.setName("RNN"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` std::shared_ptr<ActivationOp<TensorT>> activation, activation_grad; if (add_feature_norm) { activation = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } else { activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); } std::shared_ptr<ActivationOp<TensorT>> activation_norm = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_norm_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-5, 0.9, 0.999, 1e-8, 10)); // Add the 1st RNN layer if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); model_builder.addSinglyConnected(model, "EN0-Rec", node_names, node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), solver_op, 0.0f, specify_layers); if (add_identity) { std::vector<std::string> node_names_tmp = model_builder.addSinglyConnected(model, "EN0-Identity0", "EN0-Identity0", node_names, node_names.size(), activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, false, specify_layers); model_builder.addSinglyConnected(model, "EN0-Identity1", node_names_tmp, node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "EN0-Norm", "EN0-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN0-Norm-gain", "EN0-Norm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } } // Add the 2nd FC layer if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); model_builder.addSinglyConnected(model, "EN1-Rec", node_names, node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), solver_op, 0.0f, specify_layers); if (add_identity) { std::vector<std::string> node_names_tmp = model_builder.addSinglyConnected(model, "EN1-Identity0", "EN1-Identity0", node_names, node_names.size(), activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, false, specify_layers); model_builder.addSinglyConnected(model, "EN1-Identity1", node_names_tmp, node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "EN1-Norm", "EN1-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN1-Norm-gain", "EN1-Norm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } } // Add a final output layer node_names = model_builder.addFullyConnected(model, "FC-Out", "FC-Out", node_names, n_outputs, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 2)), solver_op, 0.0f, 0.0f, false, true); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } void adaptiveTrainerScheduler( const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<float>& model_errors) override { //if (n_epochs % 100 == 0 && n_epochs > 100) { // // anneal the learning rate by half on each plateau // TensorT lr_new = this->reduceLROnPlateau(model_errors, 0.5, 100, 10, 0.1); // if (lr_new < 1.0) { // model_interpreter.updateSolverParams(0, lr_new); // std::cout << "The learning rate has been annealed by a factor of " << lr_new << std::endl; // } //} if (n_epochs % 1000 == 0 && n_epochs != 0) { // save the model every 1000 epochs model_interpreter.getModelResults(model, false, true, false, false); ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } } void trainingModelLogger(const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) override { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); model_logger.setLogNodeOutputsEpoch(false); model_logger.setLogNodeInputsEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1000 == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_interpreter.getModelResults(model, true, false, false, false); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->metric_names_) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } }; template<typename TensorT> class DataSimulatorExt : public MNISTSimulator<TensorT> { public: int n_input_nodes_ = 1; int memory_size_ = 784; void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps)override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); assert(n_output_nodes == 2 * this->training_labels.dimension(1)); assert(n_metric_output_nodes == this->training_labels.dimension(1)); assert(n_input_nodes == n_input_nodes_); assert(memory_size == memory_size_); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, 1); // Reformat the input data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { // Assign the final output data (only once) for (int nodes_iter = 0; nodes_iter < this->training_labels.dimension(1); ++nodes_iter) { loss_output_data(batch_iter, 0, nodes_iter) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); loss_output_data(batch_iter, 0, nodes_iter + this->training_labels.dimension(1)) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); metric_output_data(batch_iter, 0, nodes_iter) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); } // Assign the input data for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { int iter = memory_size * memory_iter + nodes_iter; input_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], iter); } } } } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps)override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); assert(n_output_nodes == 2 * this->validation_labels.dimension(1)); assert(n_metric_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == n_input_nodes_); assert(memory_size == memory_size_); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, 1); // Reformat the input data for validation for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { // Assign the output data for (int nodes_iter = 0; nodes_iter < this->validation_labels.dimension(1); ++nodes_iter) { loss_output_data(batch_iter, 0, nodes_iter) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); loss_output_data(batch_iter, 0, nodes_iter + this->validation_labels.dimension(1)) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); metric_output_data(batch_iter, 0, nodes_iter) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); } // Assign the input data for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { int iter = memory_size * memory_iter + nodes_iter; input_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], iter); } } } } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> {}; /** @brief Pixel by pixel MNIST example whereby each pixel is read into the model one by one and a classification is given after reading in all pixels Data processing: - whole image pixels (linearized) 28x28 normalized to 0 to 1 - classifier (1 hot vector from 0 to 9) */ void main_MNIST(const std::string& data_dir, const bool& make_model, const bool& train_model) { const int n_hard_threads = std::thread::hardware_concurrency(); const int n_threads = 1; // define the populatin trainer PopulationTrainerExt<float> population_trainer; population_trainer.setNGenerations(1); population_trainer.setLogging(false); // define the population logger PopulationLogger<float> population_logger(true, true); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false, false); // define the data simulator const std::size_t input_size = 784; const std::size_t n_input_nodes = 28; // per column) const std::size_t memory_size = input_size / n_input_nodes; const std::size_t n_tbptt = (memory_size > 256) ? 256 : memory_size; const std::size_t n_labels = 10; const std::size_t training_data_size = 60000; //60000; const std::size_t validation_data_size = 10000; //10000; DataSimulatorExt<float> data_simulator; data_simulator.memory_size_ = memory_size; data_simulator.n_input_nodes_ = n_input_nodes; // Model architecture config 0 const std::size_t n_blocks_1 = 128; const std::size_t n_cells_1 = 1; const std::size_t n_blocks_2 = 0; const std::size_t n_cells_2 = 1; const bool add_forget_gate = true; const std::size_t n_hidden = 0; //// Model architecture config 1 //const std::size_t n_blocks_1 = 128; //const std::size_t n_cells_1 = 1; //const std::size_t n_blocks_2 = 0; //const std::size_t n_cells_2 = 1; //const bool add_forget_gate = true; //const std::size_t n_hidden = 64; //// Model architecture config 2 //const std::size_t n_blocks_1 = 128; //const std::size_t n_cells_1 = 1; //const std::size_t n_blocks_2 = 128; //const std::size_t n_cells_2 = 1; //const bool add_forget_gate = true; //const std::size_t n_hidden = 0; // read in the training data std::string training_data_filename = data_dir + "train-images.idx3-ubyte"; std::string training_labels_filename = data_dir + "train-labels.idx1-ubyte"; data_simulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // read in the validation data std::string validation_data_filename = data_dir + "t10k-images.idx3-ubyte"; std::string validation_labels_filename = data_dir + "t10k-labels.idx1-ubyte"; data_simulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); data_simulator.unitScaleData(); // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < n_input_nodes; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the output nodes std::vector<std::string> output_nodes; for (int i = 0; i < data_simulator.mnist_labels.size(); ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(2, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(32); model_trainer.setMemorySize(memory_size); model_trainer.setNEpochsTraining(100001); model_trainer.setNEpochsValidation(25); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true, false, false); model_trainer.setNTETTSteps(1); model_trainer.setNTBPTTSteps(n_tbptt); model_trainer.setPreserveOoO(true); model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setLossFunctions({ std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-24, 0.0)), std::make_shared<CrossEntropyWithLogitsLossOp<float>>(CrossEntropyWithLogitsLossOp<float>(1e-24, 1.0)) }); model_trainer.setLossFunctionGrads({ std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-24, 0.0)), std::make_shared<CrossEntropyWithLogitsLossGradOp<float>>(CrossEntropyWithLogitsLossGradOp<float>(1e-24, 1.0)) }); model_trainer.setLossOutputNodes({ output_nodes, output_nodes }); model_trainer.setMetricFunctions({ std::make_shared<PrecisionMCMicroOp<float>>(PrecisionMCMicroOp<float>()) }); model_trainer.setMetricOutputNodes({ output_nodes }); model_trainer.setMetricNames({ "PrecisionMCMicro" }); // define the model replicator ModelReplicatorExt<float> model_replicator; // define the initial population std::cout << "Initializing the population..." << std::endl; Model<float> model; if (make_model) { model_trainer.makeRNN(model, input_nodes.size(), output_nodes.size(), 128, 0, false, false, true); model_trainer.setFindCycles(true); //model_trainer.makeLSTM(model, input_nodes.size(), output_nodes.size(), n_blocks_1, n_cells_1, n_blocks_2, n_cells_2, n_hidden, add_forget_gate, false, true, true); } else { // read in the trained model std::cout << "Reading in the model..." << std::endl; const std::string model_filename = data_dir + "LSTM_model.binary"; const std::string interpreter_filename = data_dir + "LSTM_interpreter.binary"; ModelFile<float> model_file; model_file.loadModelBinary(model_filename, model); model.setId(1); model.setName("LSTM1"); ModelInterpreterFileDefaultDevice<float> model_interpreter_file; model_interpreter_file.loadModelInterpreterBinary(interpreter_filename, model_interpreters[0]); } //std::vector<Model<float>> population = { model }; if (train_model) { // Train the model std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); //// Evolve the population //std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); //PopulationTrainerFile<float> population_trainer_file; //population_trainer_file.storeModels(population, "MNIST"); //population_trainer_file.storeModelValidations("MNISTErrors.csv", models_validation_errors_per_generation); //ModelFile<float> data; //data.storeModelCsv(population.front().getName() + "_nodes.csv", // population.front().getName() + "_links.csv", // population.front().getName() + "_weights.csv", // population.front(), true, true, true); } else { //// Evaluate the population //population_trainer.evaluateModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, input_nodes); } }; int main(int argc, char** argv) { // Parse the user commands std::string data_dir = "C:/Users/dmccloskey/Documents/GitHub/mnist/"; //std::string data_dir = "/home/user/data/"; //std::string data_dir = "C:/Users/domccl/GitHub/mnist/"; bool make_model = true, train_model = true; if (argc >= 2) { data_dir = argv[1]; } if (argc >= 3) { make_model = (argv[2] == std::string("true")) ? true : false; } if (argc >= 4) { train_model = (argv[3] == std::string("true")) ? true : false; } // run the application main_MNIST(data_dir, make_model, train_model); return 0; }<file_sep>Contributing to EvoNet ============================================================================= Would you like to enhance EvoNet? If you are here to do so, please read the following sections. Get in touch ------------ - Ask questions on GitHub repository. - Report bugs, suggest features, tests and changes in source code `on GitHub <https://github.com/dmccloskey/EvoNet>`_. - You are also welcome to reach out through an e-mail to one of our developers. <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Link test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/Link.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(link1) BOOST_AUTO_TEST_CASE(constructor) { Link* ptr = nullptr; Link* nullPointer = nullptr; ptr = new Link(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { Link* ptr = nullptr; ptr = new Link(); delete ptr; } BOOST_AUTO_TEST_CASE(constructor2) { Link link; std::string node_source = "1"; std::string node_sink = "2"; std::string weight = "1"; link = Link("1", node_source, node_sink, weight); BOOST_CHECK_EQUAL(link.getName(), "1"); BOOST_CHECK_EQUAL(link.getSourceNodeName(), node_source); BOOST_CHECK_EQUAL(link.getSinkNodeName(), node_sink); BOOST_CHECK_EQUAL(link.getWeightName(), "1"); // test same sink and source nodes link = Link("1", node_source, node_source, weight); BOOST_CHECK_EQUAL(link.getName(), "1"); BOOST_CHECK_EQUAL(link.getSourceNodeName(), node_source); BOOST_CHECK_NE(link.getSinkNodeName(),node_sink); BOOST_CHECK_EQUAL(link.getWeightName(), "1"); // test overload constructor link = Link("1", node_source, node_sink, weight); BOOST_CHECK_EQUAL(link.getName(), "1"); BOOST_CHECK_EQUAL(link.getSourceNodeName(), node_source); BOOST_CHECK_EQUAL(link.getSinkNodeName(), node_sink); BOOST_CHECK_EQUAL(link.getWeightName(), "1"); } BOOST_AUTO_TEST_CASE(comparison) { std::string source, sink, weight; source = "1"; sink = "2"; weight = "3"; Link link, link_test; link = Link("1", source, sink, weight); link_test = Link("1", source, sink, weight); BOOST_CHECK(link == link_test); link = Link("2", source, sink, weight); BOOST_CHECK(link != link_test); link = Link("1", source, source, weight); BOOST_CHECK(link != link_test); link = Link("1", sink, sink, weight); BOOST_CHECK(link != link_test); link = Link("1", sink, sink, "4"); BOOST_CHECK(link != link_test); } BOOST_AUTO_TEST_CASE(gettersAndSetters) { std::string node_source = "1"; std::string node_sink = "2"; Link link; link.setId(1); link.setName("Link1"); link.setSourceNodeName(node_source); link.setSinkNodeName(node_sink); link.setWeightName("3"); link.setModuleId(4); link.setModuleName("Module1"); BOOST_CHECK_EQUAL(link.getId(), 1); BOOST_CHECK_EQUAL(link.getName(), "Link1"); BOOST_CHECK_EQUAL(link.getSourceNodeName(), node_source); BOOST_CHECK_EQUAL(link.getSinkNodeName(), node_sink); BOOST_CHECK_EQUAL(link.getWeightName(), "3"); BOOST_CHECK_EQUAL(link.getModuleId(), 4); BOOST_CHECK_EQUAL(link.getModuleName(), "Module1"); // test same sink and source nodes link.setSourceNodeName(node_sink); BOOST_CHECK_EQUAL(link.getSourceNodeName(), node_sink); BOOST_CHECK_EQUAL(link.getSinkNodeName(), node_sink); // test same sink and source nodes link.setSourceNodeName(node_source); link.setSinkNodeName(node_source); BOOST_CHECK_EQUAL(link.getSourceNodeName(), node_source); BOOST_CHECK_EQUAL(link.getSinkNodeName(), node_source); } BOOST_AUTO_TEST_SUITE_END()<file_sep>set(ml_executables_list MNIST_Bayes_example MNIST_CovNet_example MNIST_CVAE_example #MNIST_DenoisingAE_example MNIST_DotProdAtten_example MNIST_EvoNet_example MNIST_LSTM_example MNIST_VAE_example AddProbAtt_example AddProbRec_example HarmonicOscillator_example KineticModel_example KineticModel2_example Metabolomics_BatchCorrection Metabolomics_Classification Metabolomics_LatentArithmetic Metabolomics_LatentTraversal Metabolomics_LatentUnsClass Metabolomics_VAE #Metabolomics_Stats_RBC #Metabolomics_Stats_KALE PeakIntegrator_app ) set(cuda_executables_list HarmonicOscillator_Gpu_example KineticModel_Gpu_example KineticModel2_Gpu_example MNIST_Bayes_Gpu_example MNIST_CovNet_Gpu_example MNIST_CVAE_Gpu_example #MNIST_DenoisingAE_Gpu_example MNIST_DotProdAtten_Gpu_example MNIST_LSTM_Gpu_example MNIST_VAE_Gpu_example AddProbAtt_Gpu_example AddProbRec_Gpu_example Metabolomics_BatchCorrection_Gpu Metabolomics_Classification_Gpu Metabolomics_LatentTraversal_Gpu Metabolomics_LatentUnsClass_Gpu Metabolomics_VAE_Gpu PeakIntegrator_Gpu_app ) ### collect example executables set(EXAMPLE_executables ${ml_executables_list} ${cuda_executables_list} ) <file_sep># Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os, subprocess from distutils.dir_util import copy_tree project = 'EvoNet' copyright = '2022, EvoNet Team' author = 'EvoNet Team' extensions = [ #"breathe", #"exhale", "sphinx.ext.todo", "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx.ext.autosectionlabel" ] todo_include_todos = True todo_link_only = True breathe_default_project = "EvoNet" exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '*.csv'] master_doc = 'index' # html_theme = 'sphinx_rtd_theme' def configureDoxyfile(input_dir, output_dir): with open('Doxyfile.in', 'r') as file : filedata = file.read() filedata = filedata.replace('@DOXYGEN_INPUT_DIR@', input_dir) filedata = filedata.replace('@DOXYGEN_OUTPUT_DIR@', output_dir) with open('Doxyfile', 'w') as file: file.write(filedata) docs_build_on_RtD = os.environ.get('READTHEDOCS', None) == 'True' breathe_projects = {"EvoNet" : "docs/xml"} if docs_build_on_RtD: input_dir = '../src/evonet/' output_dir = 'build' copy_tree('../images', 'images') configureDoxyfile(input_dir, output_dir) subprocess.call('doxygen', shell=True) breathe_projects['EvoNet'] = output_dir + '/xml' exhale_args = { # These arguments are required "containmentFolder": "./api", "rootFileName": "library_root.rst", "rootFileTitle": "Library API", "doxygenStripFromPath": "..", # Suggested optional arguments "createTreeView": True, # TIP: if using the sphinx-bootstrap-theme, you need # "treeViewIsBootstrap": True, "exhaleExecutesDoxygen": True, "exhaleDoxygenStdin": "INPUT = ../src/evonet/include/EvoNet" } primary_domain = 'cpp' highlight_language = 'cpp'<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE CVAEFullyConnDefaultDevice test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/models/CVAEFullyConnDefaultDevice.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(CVAEFullyConnDefaultDevice1) BOOST_AUTO_TEST_CASE(constructorDefaultDevice) { CVAEFullyConnDefaultDevice<float>* ptr = nullptr; CVAEFullyConnDefaultDevice<float>* nullPointer = nullptr; ptr = new CVAEFullyConnDefaultDevice<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorDefaultDevice) { CVAEFullyConnDefaultDevice<float>* ptr = nullptr; ptr = new CVAEFullyConnDefaultDevice<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(makeCVAEDefaultDevice) { CVAEFullyConnDefaultDevice<float> model_trainer; Model<float> model; // prepare the parameters EvoNetParameters::ModelTrainer::NHidden0 n_hidden_0("n_hidden_0", 4); EvoNetParameters::ModelTrainer::NHidden1 n_hidden_1("n_hidden_1", 4); EvoNetParameters::ModelTrainer::NHidden2 n_hidden_2("n_hidden_2", 0); EvoNetParameters::ModelTrainer::NEncodingsContinuous n_encodings_continuous("n_encodings_continuous", 2); EvoNetParameters::ModelTrainer::NEncodingsCategorical n_encodings_categorical("n_encodings_categorical", 2); auto parameters = std::make_tuple(n_hidden_0, n_hidden_1, n_hidden_2, n_encodings_continuous, n_encodings_categorical); // make the model int n_input = 8; model_trainer.makeCVAE(model, n_input, std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); BOOST_CHECK(model.checkCompleteInputToOutput()); // Check the input nodes std::vector<std::string> input_nodes; makeInputNodes(input_nodes, n_input); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } // Check the encoding nodes input_nodes.clear(); EvoNet::apply([&input_nodes](auto&& ...args) { makeGaussianEncodingSamplerNodes(input_nodes, args ...); }, parameters); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } input_nodes.clear(); EvoNet::apply([&input_nodes](auto&& ...args) { makeCategoricalEncodingSamplerNodes(input_nodes, args ...); }, parameters); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } input_nodes.clear(); EvoNet::apply([&input_nodes](auto&& ...args) { makeCategoricalEncodingTauNodes(input_nodes, args ...); }, parameters); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } input_nodes.clear(); EvoNet::apply([&input_nodes](auto&& ...args) { makeGaussianEncodingSamplerNodes(input_nodes, args ...); }, parameters); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } // Check the output nodes std::vector<std::string> output_nodes = makeOutputNodes(n_input); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); EvoNet::apply([&output_nodes](auto&& ...args) { output_nodes = makeMuEncodingNodes(args ...); }, parameters); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); EvoNet::apply([&output_nodes](auto&& ...args) { output_nodes = makeLogVarEncodingNodes(args ...); }, parameters); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); EvoNet::apply([&output_nodes](auto&& ...args) { output_nodes = makeAlphaEncodingNodes(args ...); }, parameters); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); EvoNet::apply([&output_nodes](auto&& ...args) { output_nodes = makeCategoricalSoftmaxNodes(args ...); }, parameters); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); } BOOST_AUTO_TEST_CASE(makeCVAEEncoderDefaultDevice) { CVAEFullyConnDefaultDevice<float> model_trainer; Model<float> model; // prepare the parameters EvoNetParameters::ModelTrainer::NHidden0 n_hidden_0("n_hidden_0", 4); EvoNetParameters::ModelTrainer::NHidden1 n_hidden_1("n_hidden_1", 4); EvoNetParameters::ModelTrainer::NHidden2 n_hidden_2("n_hidden_2", 0); EvoNetParameters::ModelTrainer::NEncodingsContinuous n_encodings_continuous("n_encodings_continuous", 2); EvoNetParameters::ModelTrainer::NEncodingsCategorical n_encodings_categorical("n_encodings_categorical", 2); auto parameters = std::make_tuple(n_hidden_0, n_hidden_1, n_hidden_2, n_encodings_continuous, n_encodings_categorical); // make the model int n_input = 8; model_trainer.makeCVAEEncoder(model, n_input, std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); BOOST_CHECK(model.checkCompleteInputToOutput()); // Check the input nodes std::vector<std::string> input_nodes; makeInputNodes(input_nodes, n_input); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } // Check the output nodes std::vector<std::string> output_nodes; EvoNet::apply([&output_nodes](auto&& ...args) { output_nodes = makeMuEncodingNodes(args ...); }, parameters); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); EvoNet::apply([&output_nodes](auto&& ...args) { output_nodes = makeLogVarEncodingNodes(args ...); }, parameters); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); EvoNet::apply([&output_nodes](auto&& ...args) { output_nodes = makeAlphaEncodingNodes(args ...); }, parameters); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); } BOOST_AUTO_TEST_CASE(makeCVAEClassifierDefaultDevice) { CVAEFullyConnDefaultDevice<float> model_trainer; Model<float> model; // prepare the parameters EvoNetParameters::ModelTrainer::NHidden0 n_hidden_0("n_hidden_0", 4); EvoNetParameters::ModelTrainer::NHidden1 n_hidden_1("n_hidden_1", 4); EvoNetParameters::ModelTrainer::NHidden2 n_hidden_2("n_hidden_2", 0); EvoNetParameters::ModelTrainer::NEncodingsCategorical n_encodings_categorical("n_encodings_categorical", 2); auto parameters = std::make_tuple(n_hidden_0, n_hidden_1, n_hidden_2, n_encodings_categorical); // make the model int n_input = 8; model_trainer.makeCVAEClassifier(model, n_input, std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); BOOST_CHECK(model.checkCompleteInputToOutput()); // Check the input nodes std::vector<std::string> input_nodes; makeInputNodes(input_nodes, n_input); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } // Check the output nodes std::vector<std::string> output_nodes; output_nodes.clear(); EvoNet::apply([&output_nodes](auto&& ...args) { output_nodes = makeAlphaEncodingNodes(args ...); }, parameters); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } output_nodes.clear(); } BOOST_AUTO_TEST_CASE(makeCVAEDecoderDefaultDevice) { CVAEFullyConnDefaultDevice<float> model_trainer; Model<float> model; // prepare the parameters EvoNetParameters::ModelTrainer::NHidden0 n_hidden_0("n_hidden_0", 4); EvoNetParameters::ModelTrainer::NHidden1 n_hidden_1("n_hidden_1", 4); EvoNetParameters::ModelTrainer::NHidden2 n_hidden_2("n_hidden_2", 0); EvoNetParameters::ModelTrainer::NEncodingsContinuous n_encodings_continuous("n_encodings_continuous", 2); EvoNetParameters::ModelTrainer::NEncodingsCategorical n_encodings_categorical("n_encodings_categorical", 2); auto parameters = std::make_tuple(n_hidden_0, n_hidden_1, n_hidden_2, n_encodings_continuous, n_encodings_categorical); // make the model int n_input = 8; model_trainer.makeCVAEDecoder(model, n_input, std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); BOOST_CHECK(model.checkCompleteInputToOutput()); // Check the input nodes std::vector<std::string> input_nodes; // Check the encoding nodes EvoNet::apply([&input_nodes](auto&& ...args) { makeMuEncodingNodes(input_nodes, args ...); }, parameters); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } input_nodes.clear(); EvoNet::apply([&input_nodes](auto&& ...args) { makeAlphaEncodingNodes(input_nodes, args ...); }, parameters); for (std::string node : input_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::input); } input_nodes.clear(); // Check the output nodes std::vector<std::string> output_nodes = makeOutputNodes(n_input); for (std::string node : output_nodes) { BOOST_CHECK(model.nodes_.count(node) > 0); BOOST_CHECK(model.nodes_.at(node)->getType() == NodeType::output); } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELTRAINEREXPERIMENTALGPU_H #define EVONET_MODELTRAINEREXPERIMENTALGPU_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> // .h #include <EvoNet/ml/ModelTrainerExperimental.h> #include <EvoNet/ml/ModelInterpreterGpu.h> // .cpp #include <EvoNet/io/ModelInterpreterFileGpu.h> #include <EvoNet/io/ModelFile.h> namespace EvoNet { /** @brief Class to train a network model */ template<typename TensorT> class ModelTrainerExperimentalGpu : public ModelTrainerExperimental<TensorT, ModelInterpreterGpu<TensorT>> { public: ModelTrainerExperimentalGpu() = default; ///< Default constructor ~ModelTrainerExperimentalGpu() = default; ///< Default destructor /// Overrides used in all examples void adaptiveTrainerScheduler(const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterGpu<TensorT>& model_interpreter, const std::vector<TensorT>& model_errors) override; }; template<typename TensorT> inline void ModelTrainerExperimentalGpu<TensorT>::adaptiveTrainerScheduler(const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterGpu<TensorT>& model_interpreter, const std::vector<TensorT>& model_errors) { if (n_epochs % 1000 == 0 && n_epochs != 0) { // save the model every 1000 epochs model_interpreter.getModelResults(model, false, true, false, false); ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileGpu<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } } } #endif #endif //EVONET_MODELTRAINEREXPERIMENTALGPU_H<file_sep>#ifndef EVONET_MODELKERNAL_H #define EVONET_MODELKERNAL_H #define EIGEN_USE_THREADS #include <unsupported/Eigen/CXX11/Tensor> #include <EvoNet/ml/ActivationFunctionTensor.h> #include <EvoNet/ml/IntegrationFunctionTensor.h> #include <EvoNet/ml/LossFunctionTensor.h> #include <EvoNet/ml/SolverTensor.h> #include <EvoNet/ml/MetricFunctionTensor.h> namespace EvoNet { /* @brief Class for all main Model kernals. A single kernal is generated per method (i.e., the device is called only once per kernal method). The only except is executeModelErrors where a kernal is generated for both the calculation of the model error and node errors in order to use only a host to device memcopy of the predicted node values. */ template <typename TensorT, typename DeviceT> class ModelKernal { public: ModelKernal() = default; ~ModelKernal() = default; virtual bool executeNodeActivation( TensorT* h_node_inputs, TensorT* d_node_inputs, TensorT* h_node_outputs, TensorT* d_node_outputs, TensorT* h_sink_dt, TensorT* d_sink_dt, std::shared_ptr<ActivationTensorOp<TensorT,DeviceT>>& activation_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; virtual bool executeNodeDerivative( TensorT* h_node_outputs, TensorT* d_node_outputs, TensorT* h_node_derivative, TensorT* d_node_derivative, std::shared_ptr<ActivationTensorOp<TensorT,DeviceT>>& activation_grad_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; virtual bool executeForwardPropogation( TensorT* h_source_outputs, TensorT* d_source_outputs, TensorT* h_weights, TensorT* d_weights, TensorT* h_sink_inputs, TensorT* d_sink_inputs, std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>>& sink_integration_function, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; virtual bool executeBackwardPropogation( TensorT* h_source_errors, TensorT* d_source_errors, TensorT* h_source_inputs, TensorT* d_source_inputs, TensorT* h_sink_output, TensorT* d_sink_output, TensorT* h_weights, TensorT* d_weights, TensorT* h_sink_error, TensorT* d_sink_error, TensorT* h_sink_derivative, TensorT* d_sink_derivative, const int& n_input_nodes, std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>>& source_integration_functions, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; virtual bool executeModelErrors( Eigen::Tensor<TensorT, 2>& expected, TensorT* h_node_output, TensorT* d_node_output, TensorT* h_model_error, TensorT* d_model_error, TensorT* h_node_errors, TensorT* d_node_errors, std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>>& loss_function, std::shared_ptr < LossFunctionGradTensorOp<TensorT, DeviceT>>& loss_grad_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; virtual bool executeModelMetric( Eigen::Tensor<TensorT, 2>& expected, TensorT* h_node_output, TensorT* d_node_output, TensorT* h_model_metric, TensorT* d_model_metric, std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>>& metric_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; virtual bool executeWeightErrors( TensorT* h_sink_errors, TensorT* d_sink_errors, TensorT* h_source_outputs, TensorT* d_source_outputs, TensorT* h_source_inputs, TensorT* d_source_inputs, const int& n_input_nodes, std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>>& sink_integration_function, TensorT* h_weight, TensorT* d_weight, TensorT* h_weight_error, TensorT* d_weight_error, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; virtual bool executeSharedWeightErrors( TensorT* h_weight_error, TensorT* d_weight_error, TensorT* h_shared_weights, TensorT* d_shared_weights, const int& source_layer_size, const int& sink_layer_size, const int& n_shared_layers, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; virtual bool executeWeightUpdate( TensorT* h_weight, TensorT* d_weight, TensorT* h_solver_params, TensorT* d_solver_params, TensorT* h_weight_error, TensorT* d_weight_error, std::shared_ptr<SolverTensorOp<TensorT, DeviceT>>& solver_function, const int& source_layer_size, const int& sink_layer_size, const int& iter, DeviceT& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) = 0; void combineSharedWeightErrors( TensorT* weight_error, TensorT* shared_weights, const int& source_layer_size, const int& sink_layer_size, const int& n_shared_layers, DeviceT& device) { // TODO: this hangs on both the CPU and GPU with tensors of any appreciable size Eigen::TensorMap<Eigen::Tensor<TensorT, 5>> weight_error_tensor(weight_error, 1, 1, source_layer_size, sink_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 5>> shared_weight_tensor(shared_weights, 1, 1, source_layer_size, sink_layer_size, n_shared_layers); // Step 1: multiply the weight tensor by the shared weight tensor mask; sum all shared weights auto weight_error_sum = (weight_error_tensor.broadcast(Eigen::array<int, 5>({ 1,1,1,1,n_shared_layers })) * shared_weight_tensor ).sum(Eigen::array<int, 2>({ 2, 3 })).eval().broadcast(Eigen::array<int, 3>({ source_layer_size, sink_layer_size, 1 })).eval(); // dims 3 // Step 2: multiply the weight error sum tensor by the shared weight tensor mask and subtract out the error tensor // TensorT* tmp_data; // if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { // tmp_data = new TensorT[source_layer_size * sink_layer_size]; // } //#if COMPILE_WITH_CUDA // else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { // size_t bytes = source_layer_size * sink_layer_size * sizeof(TensorT); // assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); // } //#endif // Eigen::Tensor<TensorT, 2> weight_error_diff(source_layer_size, sink_layer_size); auto weight_error_diff/*.device(device)*/ = (weight_error_sum * shared_weight_tensor.chip(0, 1).chip(0, 0) - weight_error_tensor.chip(0, 1).chip(0, 0).broadcast(Eigen::array<int, 3>({ 1,1,n_shared_layers })) * shared_weight_tensor.chip(0, 1).chip(0, 0) ).eval().sum(Eigen::array<int, 1>({ 2 })).eval(); //dims 2 // Step 3: add the weight_error_diff weight_error_tensor.chip(0, 4).chip(0, 1).chip(0, 0).device(device) += weight_error_diff; //weight_error_tensor.chip(0, 4).chip(0, 1).chip(0, 0).device(device) += (weight_error_diff == weight_error_diff).select(weight_error_diff.clip(TensorT(-1e24), TensorT(1e24)), weight_error_diff.constant(TensorT(0))); // // Deallocate temporary memory // if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { // delete[] tmp_data; // } //#if COMPILE_WITH_CUDA // else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { // assert(cudaFree(tmp_data) == cudaSuccess); // } //#endif } }; template <typename TensorT> class ModelKernalDefaultDevice : ModelKernal<TensorT, Eigen::DefaultDevice> { public: using ModelKernal<TensorT, Eigen::DefaultDevice>::ModelKernal; bool executeNodeActivation( TensorT* h_node_inputs, TensorT* d_node_inputs, TensorT* h_node_outputs, TensorT* d_node_outputs, TensorT* h_sink_dt, TensorT* d_sink_dt, std::shared_ptr<ActivationTensorOp<TensorT, Eigen::DefaultDevice>>& activation_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Activate the node net input activation_function->operator()(h_node_inputs, h_node_outputs, batch_size, memory_size, layer_size, time_step, device); return true; } bool executeNodeDerivative( TensorT* h_node_outputs, TensorT* d_node_outputs, TensorT* h_node_derivative, TensorT* d_node_derivative, std::shared_ptr<ActivationTensorOp<TensorT, Eigen::DefaultDevice>>& activation_grad_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Calculate the derivative of the sink node activation activation_grad_function->operator()(h_node_outputs, h_node_derivative, batch_size, memory_size, layer_size, time_step, device); return true; } bool executeForwardPropogation( TensorT* h_source_outputs, TensorT* d_source_outputs, TensorT* h_weights, TensorT* d_weights, TensorT* h_sink_inputs, TensorT* d_sink_inputs, std::shared_ptr<IntegrationTensorOp<TensorT, Eigen::DefaultDevice>>& sink_integration_function, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Integrate sink node input sink_integration_function->operator()(h_source_outputs, h_weights, h_sink_inputs, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); return true; }; bool executeBackwardPropogation( TensorT* h_source_errors, TensorT* d_source_errors, TensorT* h_source_inputs, TensorT* d_source_inputs, TensorT* h_sink_output, TensorT* d_sink_output, TensorT* h_weights, TensorT* d_weights, TensorT* h_sink_error, TensorT* d_sink_error, TensorT* h_sink_derivative, TensorT* d_sink_derivative, const int& n_input_nodes, std::shared_ptr<IntegrationErrorTensorOp<TensorT, Eigen::DefaultDevice>>& source_integration_functions, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Integrate sink node error source_integration_functions->operator()( h_source_errors, h_source_inputs, h_weights, h_sink_output, h_sink_error, h_sink_derivative, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); return true; }; bool executeModelErrors( Eigen::Tensor<TensorT, 2>& expected, TensorT* h_node_outputs, TensorT* d_node_outputs, TensorT* h_model_error, TensorT* d_model_error, TensorT* h_node_errors, TensorT* d_node_errors, std::shared_ptr<LossFunctionTensorOp<TensorT, Eigen::DefaultDevice>>& loss_function, std::shared_ptr<LossFunctionGradTensorOp<TensorT, Eigen::DefaultDevice>>& loss_grad_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Calculate the model error loss_function->operator()(h_node_outputs, expected.data(), h_model_error, batch_size, memory_size, layer_size, time_step, device); // Calculate the node errors loss_grad_function->operator()(h_node_outputs, expected.data(), h_node_errors, batch_size, memory_size, layer_size, time_step, device); return true; }; bool executeModelMetric( Eigen::Tensor<TensorT, 2>& expected, TensorT* h_node_output, TensorT* d_node_output, TensorT* h_model_metric, TensorT* d_model_metric, std::shared_ptr<MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>>& metric_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Calculate the model metric metric_function->operator()(h_node_output, expected.data(), h_model_metric, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); return true; }; bool executeWeightErrors( TensorT* h_sink_errors, TensorT* d_sink_errors, TensorT* h_source_outputs, TensorT* d_source_outputs, TensorT* h_source_inputs, TensorT* d_source_inputs, const int& n_input_nodes, std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, Eigen::DefaultDevice>>& sink_integration_function, TensorT* h_weight, TensorT* d_weight, TensorT* h_weight_error, TensorT* d_weight_error, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Accumulate the error for all links involving the same weight sink_integration_function->operator()(h_sink_errors, h_source_outputs, h_weight, h_source_inputs, h_weight_error, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, device); return true; }; bool executeSharedWeightErrors( TensorT* h_weight_error, TensorT* d_weight_error, TensorT* h_shared_weights, TensorT* d_shared_weights, const int& source_layer_size, const int& sink_layer_size, const int& n_shared_layers, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { if (n_shared_layers == 0) return true; // Pool the shared weights erros this->combineSharedWeightErrors(h_weight_error, h_shared_weights, source_layer_size, sink_layer_size, n_shared_layers, device); return true; }; virtual bool executeWeightUpdate( TensorT* h_weight, TensorT* d_weight, TensorT* h_solver_params, TensorT* d_solver_params, TensorT* h_weight_error, TensorT* d_weight_error, std::shared_ptr<SolverTensorOp<TensorT, Eigen::DefaultDevice>>& solver_function, const int& source_layer_size, const int& sink_layer_size, const int& iter, Eigen::DefaultDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Update the weights solver_function->operator()(h_weight, h_weight_error, h_solver_params, source_layer_size, sink_layer_size, iter, device);//getDrop()*error); return true; } }; } #endif //EVONET_MODELKERNAL_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE PopulationLogger<float> test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/PopulationLogger.h> #include <EvoNet/ml/Model.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(PopulationLogger1) BOOST_AUTO_TEST_CASE(constructor) { PopulationLogger<float>* ptr = nullptr; PopulationLogger<float>* nullPointer = nullptr; ptr = new PopulationLogger<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { PopulationLogger<float>* ptr = nullptr; ptr = new PopulationLogger<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(gettersAndSetters1) { PopulationLogger<float> population_logger(true, true); BOOST_CHECK(population_logger.getLogTimeGeneration()); BOOST_CHECK(population_logger.getLogTrainValErrorsGeneration()); } BOOST_AUTO_TEST_CASE(initLogs) { PopulationLogger<float> population_logger(true, true); population_logger.initLogs("Population1"); BOOST_CHECK_EQUAL(population_logger.getLogTimeGenerationCSVWriter().getFilename(), "Population1_TimePerGeneration.csv"); BOOST_CHECK_EQUAL(population_logger.getLogTimeGenerationCSVWriter().getLineCount(), 0); BOOST_CHECK_EQUAL(population_logger.getLogTrainValErrorsGenerationCSVWriter().getFilename(), "Population1_TrainValErrorsPerGeneration.csv"); BOOST_CHECK_EQUAL(population_logger.getLogTrainValErrorsGenerationCSVWriter().getLineCount(), 0); } BOOST_AUTO_TEST_CASE(logTimePerGeneration) { Model<float> model; model.setName("Model1"); PopulationLogger<float> population_logger(true, false); population_logger.initLogs("Population1"); population_logger.logTimePerGeneration(0); population_logger.logTimePerGeneration(1); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(logTrainValErrorsPerGeneration) { PopulationLogger<float> population_logger(false, true); population_logger.initLogs("Population1"); // make toy data std::vector<std::tuple<int, std::string, float>> model_validation_errors; for (int i = 0; i < 4; ++i) { model_validation_errors.push_back(std::make_tuple(i, std::to_string(i), float(i))); } population_logger.logTrainValErrorsPerGeneration(0, model_validation_errors); population_logger.logTrainValErrorsPerGeneration(1, model_validation_errors); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(writeLogs) { PopulationLogger<float> population_logger(true, true); population_logger.initLogs("Population1"); // make toy data std::vector<std::tuple<int, std::string, float>> model_validation_errors; for (int i = 0; i < 4; ++i) { model_validation_errors.push_back(std::make_tuple(i, std::to_string(i), float(i))); } population_logger.writeLogs(0, model_validation_errors); population_logger.writeLogs(1, model_validation_errors); // [TODO: read in and check] } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE StringParsing test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/core/StringParsing.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(stringParsing) BOOST_AUTO_TEST_CASE(SP_ReplaceTokens) { std::string test = ReplaceTokens("{postgres list}", { "[\{\}]" }, ""); BOOST_CHECK_EQUAL(test, "postgres list"); } BOOST_AUTO_TEST_CASE(SP_SplitString) { std::vector<std::string> test = SplitString("a,b,c,d,e", ","); std::vector<std::string> check = { "a","b","c","d","e" }; for (int i=0; i<check.size(); ++i) BOOST_CHECK_EQUAL(test[i], check[i]); } BOOST_AUTO_TEST_CASE(SP_RemoveWhiteSpaces) { std::string test = RemoveWhiteSpaces("A string with \t\t\t a lot of \n\n whitespace\n"); BOOST_CHECK_EQUAL(test, "Astringwithalotofwhitespace"); } BOOST_AUTO_TEST_SUITE_END()<file_sep>#ifndef EVONET_HELLOWORLD_H #define EVONET_HELLOWORLD_H namespace EvoNet { class Helloworld { public: /// Default constructor Helloworld(); /// Destructor ~Helloworld(); double addNumbers(const double& x, const double& y) const; }; } #endif //EVONET_HELLOWORLD_H<file_sep>/**TODO: Add copyright*/ /**TODO: Add copyright*/ #define BOOST_TEST_MODULE PopulationTrainer test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> using namespace EvoNet; using namespace std; // Extended classes used for testing template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> {}; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> { public: void adaptiveReplicatorScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, float>>>& models_errors_per_generations) override { if (n_generations >= 0) { setRandomModifications( std::make_pair(0, 0), // addNodeDown std::make_pair(0, 0), // addNodeRight std::make_pair(0, 0), // copyNodeDown std::make_pair(0, 0), // copyNodeRight std::make_pair(1, 1), // addLink std::make_pair(0, 0), // copyLink std::make_pair(0, 0), // deleteNode std::make_pair(1, 1), // deleteLink std::make_pair(0, 0), // changeActivation std::make_pair(0, 0), // changeIntegration std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); } } }; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> { public: void adaptivePopulationScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, float>>>& models_errors_per_generations) override { if (n_generations == getNGenerations() - 1) { setNTop(1); setNRandom(1); setNReplicatesPerModel(0); } else { setNTop(3); setNRandom(3); setNReplicatesPerModel(3); } } }; template<typename TensorT> class DataSimulatorExt : public DataSimulator<TensorT> { public: void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_epochs = input_data.dimension(3); Eigen::Tensor<TensorT, 3> input_tmp(batch_size, memory_size, n_input_nodes); input_tmp.setValues( { {{8}, {7}, {6}, {5}, {4}, {3}, {2}, {1}}, {{9}, {8}, {7}, {6}, {5}, {4}, {3}, {2}}, {{10}, {9}, {8}, {7}, {6}, {5}, {4}, {3}}, {{11}, {10}, {9}, {8}, {7}, {6}, {5}, {4}}, {{12}, {11}, {10}, {9}, {8}, {7}, {6}, {5}} } ); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = input_tmp(batch_iter, memory_iter, nodes_iter); // update the time_steps time_steps.setConstant(1.0f); } void simulateData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); Eigen::Tensor<TensorT, 3> input_tmp(batch_size, memory_size, n_input_nodes); input_tmp.setValues( { {{8}, {7}, {6}, {5}, {4}, {3}, {2}, {1}}, {{9}, {8}, {7}, {6}, {5}, {4}, {3}, {2}}, {{10}, {9}, {8}, {7}, {6}, {5}, {4}, {3}}, {{11}, {10}, {9}, {8}, {7}, {6}, {5}, {4}}, {{12}, {11}, {10}, {9}, {8}, {7}, {6}, {5}} } ); for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter<n_input_nodes; ++nodes_iter) for (int epochs_iter = 0; epochs_iter<n_epochs; ++epochs_iter) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = input_tmp(batch_iter, memory_iter, nodes_iter); Eigen::Tensor<TensorT, 3> output_tmp(batch_size, memory_size, n_output_nodes); output_tmp.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } }); for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter<n_output_nodes; ++nodes_iter) for (int epochs_iter = 0; epochs_iter<n_epochs; ++epochs_iter) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = output_tmp(batch_iter, memory_iter, nodes_iter); // update the time_steps time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override { simulateData(input_data, output_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override { simulateData(input_data, output_data, time_steps); } }; BOOST_AUTO_TEST_SUITE(populationTrainer) BOOST_AUTO_TEST_CASE(constructor) { PopulationTrainerExt<float>* ptr = nullptr; PopulationTrainerExt<float>* nullPointer = nullptr; ptr = new PopulationTrainerExt<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { PopulationTrainerExt<float>* ptr = nullptr; ptr = new PopulationTrainerExt<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(gettersAndSetters) { PopulationTrainerExt<float> population_trainer; population_trainer.setNTop(4); population_trainer.setNRandom(1); population_trainer.setNReplicatesPerModel(2); population_trainer.setNGenerations(10); population_trainer.setLogging(true); population_trainer.setRemoveIsolatedNodes(false); population_trainer.setPruneModelNum(12); population_trainer.setCheckCompleteModelInputToOutput(false); population_trainer.setSelectModels(false); population_trainer.setResetModelCopyWeights(true); population_trainer.setResetModelTemplateWeights(true); population_trainer.setPopulationSize(256); BOOST_CHECK_EQUAL(population_trainer.getNTop(), 4); BOOST_CHECK_EQUAL(population_trainer.getNRandom(), 1); BOOST_CHECK_EQUAL(population_trainer.getNReplicatesPerModel(), 2); BOOST_CHECK_EQUAL(population_trainer.getNGenerations(), 10); BOOST_CHECK(population_trainer.getLogTraining()); BOOST_CHECK(!population_trainer.getRemoveIsolatedNodes()); BOOST_CHECK_EQUAL(population_trainer.getPruneModelNum(), 12); BOOST_CHECK(!population_trainer.getCheckCompleteModelInputToOutput()); BOOST_CHECK(!population_trainer.getSelectModels()); BOOST_CHECK(population_trainer.getResetModelCopyWeights()); BOOST_CHECK(population_trainer.getResetModelTemplateWeights()); BOOST_CHECK_EQUAL(population_trainer.getPopulationSize(), 256); } BOOST_AUTO_TEST_CASE(setNEpochsTraining) { PopulationTrainerExt<float> population_trainer; population_trainer.setNEpochsTraining(101); BOOST_CHECK_EQUAL(population_trainer.getNEpochsTraining(), 101); ModelTrainerExt<float> model_trainer; BOOST_CHECK_NE(model_trainer.getNEpochsTraining(), 101); population_trainer.updateNEpochsTraining(model_trainer); BOOST_CHECK_EQUAL(model_trainer.getNEpochsTraining(), 101); population_trainer.setNEpochsTraining(-1); population_trainer.updateNEpochsTraining(model_trainer); BOOST_CHECK_EQUAL(model_trainer.getNEpochsTraining(), 101); } BOOST_AUTO_TEST_CASE(removeDuplicateModels) { PopulationTrainerExt<float> population_trainer; // make a vector of models to use for testing std::vector<Model<float>> models; for (int i=0; i<2; ++i) { for (int j=0; j<4; ++j) { Model<float> model; model.setName(std::to_string(j)); model.setId(i*j+j); models.push_back(model); } } population_trainer.removeDuplicateModels(models); BOOST_CHECK_EQUAL(models.size(), 4); for (int i=0; i<4; ++i) BOOST_CHECK_EQUAL(models[i].getName(), std::to_string(i)); } BOOST_AUTO_TEST_CASE(getTopNModels_) { PopulationTrainerExt<float> population_trainer; // make dummy data std::vector<std::tuple<int, std::string, float>> models_validation_errors; const int n_models = 4; for (int i=0; i<n_models; ++i) models_validation_errors.push_back(std::make_tuple(i+1, std::to_string(i+1), (float)(n_models-i))); const int n_top_models = 2; std::vector<std::tuple<int, std::string, float>> top_n_models = population_trainer.getTopNModels_( models_validation_errors, n_top_models); for (int i=0; i<n_top_models; ++i) { BOOST_CHECK_EQUAL(std::get<0>(top_n_models[i]), n_models-i); BOOST_CHECK_EQUAL(std::get<1>(top_n_models[i]), std::to_string(n_models - i)); BOOST_CHECK_EQUAL(std::get<2>(top_n_models[i]), (float)(i+1)); } } BOOST_AUTO_TEST_CASE(getRandomNModels_) { PopulationTrainerExt<float> population_trainer; // make dummy data std::vector<std::tuple<int, std::string, float>> models_validation_errors; const int n_models = 4; for (int i=0; i<n_models; ++i) models_validation_errors.push_back(std::make_tuple(i + 1, std::to_string(i + 1), (float)(n_models - i))); const int n_random_models = 2; std::vector<std::tuple<int, std::string, float>> random_n_models = population_trainer.getRandomNModels_( models_validation_errors, n_random_models); BOOST_CHECK_EQUAL(random_n_models.size(), 2); // for (int i=0; i<n_random_models; ++i) // { // printf("model name %s error %.2f", random_n_models[i].first.data(), random_n_models[i].second); // } } BOOST_AUTO_TEST_CASE(validateModels_) { // PopulationTrainerExt<float> population_trainer; // model_trainer_validateModels_.setBatchSize(5); // model_trainer_validateModels_.setMemorySize(8); // model_trainer_validateModels_.setNEpochs(100); // // make a vector of models to use for testing // std::vector<Model<float>> models; // Eigen::Tensor<float, 1> model_error(model_trainer_validateModels_.setBatchSize(5)); // for (int i=0; i<4; ++i) // { // Model<float> model; // model.setName(std::to_string(i)); // float values = (float)(4-i); // model_error.setValues({values, values, values, values, values}); // model.setError(model_error); // } // [TODO: complete] } BOOST_AUTO_TEST_CASE(selectModels) { PopulationTrainerExt<float> population_trainer; // [TODO: add tests] } BOOST_AUTO_TEST_CASE(replicateModels) { PopulationTrainerExt<float> population_trainer; population_trainer.setNReplicatesPerModel(2); ModelReplicatorExt<float> model_replicator; ModelBuilder<float> model_builder; // create an initial population std::vector<Model<float>> population1, population2, population3, population4, population5, population6, population7, population8; for (int i = 0; i < 2; ++i) { Model<float> model; model.setId(0); // make the baseline model std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 1); node_names = model_builder.addFullyConnected(model, "Hidden1", "Mod1", node_names, 1, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.01, 0.9, 0.999, 1e-8)), 0, 0); node_names = model_builder.addFullyConnected(model, "Output", "Mod2", node_names, 1, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.01, 0.9, 0.999, 1e-8)), 0, 0); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); //model.initNodes(4, 4); //model.initError(4, 4); model.findCycles(); Model<float> model1(model), model2(model), model3(model), // copy the models model4(model), model5(model), model6(model), model7(model), model8(model); population1.push_back(model1); // push the copies to the different test populations population2.push_back(model2); population3.push_back(model3); population4.push_back(model4); population5.push_back(model5); population6.push_back(model6); population7.push_back(model7); population8.push_back(model8); } // control (no modifications) model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), // addNodeRight std::make_pair(0, 0), // copyNodeDown std::make_pair(0, 0), // copyNodeRight std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); population_trainer.replicateModels(population1, model_replicator); BOOST_CHECK_EQUAL(population1.size(), 6); // check for the expected size for (auto& model : population1) for (const auto& weight_map : model.getWeightsMap()) BOOST_CHECK(weight_map.second->getInitWeight()); // control (additions only) model_replicator.setRandomModifications( std::make_pair(1, 1), std::make_pair(1, 1), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 1), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); population_trainer.replicateModels(population2, model_replicator); BOOST_CHECK_EQUAL(population2.size(), 6); // check for the expected size // break the new replicates (deletions only) model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 1), std::make_pair(1, 1), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); population_trainer.replicateModels(population3, model_replicator); BOOST_CHECK_EQUAL(population3.size(), 6); // check for the expected size and # of new modified models (i.e., 0) for (int i = 0; i < population3.size(); ++i) { if (i < 2) BOOST_CHECK_EQUAL(population3.at(i).links_.size(), 4); else BOOST_CHECK_EQUAL(population3.at(i).links_.size(), 0); } // reset_model_copy_weights = true population_trainer.setResetModelCopyWeights(true); population_trainer.setResetModelTemplateWeights(false); for (auto& model : population4) for (auto& weight_map : model.getWeightsMap()) weight_map.second->setInitWeight(false); model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), // addNodeRight std::make_pair(0, 0), // copyNodeDown std::make_pair(0, 0), // copyNodeRight std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); population_trainer.replicateModels(population4, model_replicator); BOOST_CHECK_EQUAL(population4.size(), 6); // check for the expected size for (auto& model : population4) { for (const auto& weight_map : model.getWeightsMap()) { if (model.getId() == 0) { BOOST_CHECK(!weight_map.second->getInitWeight()); } else { BOOST_CHECK(weight_map.second->getInitWeight()); } } } // reset_model_template_weights = true population_trainer.setResetModelCopyWeights(false); population_trainer.setResetModelTemplateWeights(true); for (auto& model : population5) for (auto& weight_map : model.getWeightsMap()) weight_map.second->setInitWeight(false); population_trainer.replicateModels(population5, model_replicator); BOOST_CHECK_EQUAL(population5.size(), 6); // check for the expected size for (auto& model : population5) { for (const auto& weight_map : model.getWeightsMap()) { if (model.getId() == 0) { BOOST_CHECK(weight_map.second->getInitWeight()); } else { BOOST_CHECK(!weight_map.second->getInitWeight()); } } } // remove_isolated_nodes = false population_trainer.setResetModelCopyWeights(false); population_trainer.setResetModelTemplateWeights(false); population_trainer.setRemoveIsolatedNodes(false); population_trainer.replicateModels(population6, model_replicator); BOOST_CHECK_EQUAL(population6.size(), 6); // check for the expected size // TODO: implement test // prune_model_num > 10 population_trainer.setRemoveIsolatedNodes(true); population_trainer.setPruneModelNum(100); population_trainer.replicateModels(population7, model_replicator); BOOST_CHECK_EQUAL(population7.size(), 6); // check for the expected size // TODO: implement test // check_complete_input_to_output = false population_trainer.setPruneModelNum(10); population_trainer.setCheckCompleteModelInputToOutput(false); population_trainer.replicateModels(population8, model_replicator); BOOST_CHECK_EQUAL(population8.size(), 6); // check for the expected size // TODO: implement test // // check for the expected tags // int cnt = 0; // for (const Model& model: population) // { // std::regex re("@replicateModel:"); // std::vector<std::string> str_tokens; // std::copy( // std::sregex_token_iterator(model.getName().begin(), model.getName().end(), re, -1), // std::sregex_token_iterator(), // std::back_inserter(str_tokens)); // if (cnt < 2) // BOOST_CHECK_EQUAL(str_tokens.size(), 1); // original model, no tag // else // BOOST_CHECK_EQUAL(str_tokens.size(), 2); // replicaed moel, tag // cnt += 1; // } } BOOST_AUTO_TEST_CASE(trainModels) { const std::vector<std::string> input_nodes = { "Input_000000000000" }; // true inputs + biases const std::vector<std::string> output_nodes = { "Output_000000000000" }; const int batch_size = 5; const int memory_size = 8; const int n_epochs_training = 5; const int n_epochs_validation = 5; const int n_epochs_evaluation = 5; PopulationTrainerExt<float> population_trainer; std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < 2; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; model_interpreters.push_back(ModelInterpreterDefaultDevice<float>(model_resources)); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(batch_size); model_trainer.setMemorySize(memory_size); model_trainer.setNEpochsTraining(n_epochs_training); model_trainer.setNEpochsValidation(n_epochs_validation); model_trainer.setNEpochsEvaluation(n_epochs_evaluation); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); model_trainer.setLossFunctionHelpers(loss_function_helpers); ModelReplicatorExt<float> model_replicator; model_replicator.setNNodeDownAdditions(1); model_replicator.setNLinkAdditions(1); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); ModelBuilder<float> model_builder; // create an initial population std::vector<Model<float>> population; for (int i=0; i<4; ++i) { Model<float> model; // make the baseline model std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 1); node_names = model_builder.addFullyConnected(model, "Hidden1", "Mod1", node_names, 1, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.01, 0.9, 0.999, 1e-8)), 0, 0); node_names = model_builder.addFullyConnected(model, "Output", "Mod2", node_names, 1, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.01, 0.9, 0.999, 1e-8)), 0, 0); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setId(i); model.setName(std::to_string(i)); population.push_back(model); } // Break two of the models for (int i=0; i<2; ++i) { model_replicator.deleteLink(population[i], 1e6); model_replicator.deleteLink(population[i], 1e6); model_replicator.deleteLink(population[i], 1e6); } // Toy data set used for all tests // Make the input data Eigen::Tensor<float, 4> input_data(batch_size, memory_size, (int)input_nodes.size(), n_epochs_training); Eigen::Tensor<float, 3> input_tmp(batch_size, memory_size, (int)input_nodes.size()); input_tmp.setValues( { {{8}, {7}, {6}, {5}, {4}, {3}, {2}, {1}}, {{9}, {8}, {7}, {6}, {5}, {4}, {3}, {2}}, {{10}, {9}, {8}, {7}, {6}, {5}, {4}, {3}}, {{11}, {10}, {9}, {8}, {7}, {6}, {5}, {4}}, {{12}, {11}, {10}, {9}, {8}, {7}, {6}, {5}} } ); for (int batch_iter=0; batch_iter<batch_size; ++batch_iter) for (int memory_iter=0; memory_iter<memory_size; ++memory_iter) for (int nodes_iter=0; nodes_iter<(int)input_nodes.size(); ++nodes_iter) for (int epochs_iter=0; epochs_iter<n_epochs_training; ++epochs_iter) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = input_tmp(batch_iter, memory_iter, nodes_iter); // Make the output data Eigen::Tensor<float, 4> output_data(batch_size, memory_size, (int)output_nodes.size(), n_epochs_training); Eigen::Tensor<float, 3> output_tmp(batch_size, memory_size, (int)output_nodes.size()); output_tmp.setValues( { { { 1 },{ 1 },{ 2 },{ 2 },{ 3 },{ 3 },{ 4 },{ 4 } }, { { 1 },{ 2 },{ 2 },{ 3 },{ 3 },{ 4 },{ 4 },{ 5 } }, { { 2 },{ 2 },{ 3 },{ 3 },{ 4 },{ 4 },{ 5 },{ 5 } }, { { 2 },{ 3 },{ 3 },{ 4 },{ 4 },{ 5 },{ 5 },{ 6 } }, { { 3 },{ 3 },{ 4 },{ 4 },{ 5 },{ 5 },{ 6 },{ 6 } } }); for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter<(int)output_nodes.size(); ++nodes_iter) for (int epochs_iter = 0; epochs_iter<n_epochs_training; ++epochs_iter) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = output_tmp(batch_iter, memory_iter, nodes_iter); // Make the simulation time_steps Eigen::Tensor<float, 3> time_steps(batch_size, memory_size, n_epochs_training); Eigen::Tensor<float, 2> time_steps_tmp(batch_size, memory_size); time_steps_tmp.setValues({ {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}} ); for (int batch_iter=0; batch_iter<batch_size; ++batch_iter) for (int memory_iter=0; memory_iter<memory_size; ++memory_iter) for (int epochs_iter=0; epochs_iter<n_epochs_training; ++epochs_iter) time_steps(batch_iter, memory_iter, epochs_iter) = time_steps_tmp(batch_iter, memory_iter); population_trainer.trainModels(population, model_trainer, model_interpreters,ModelLogger<float>(), input_data, output_data, time_steps, input_nodes); BOOST_CHECK_EQUAL(population.size(), 4); // broken models should still be there // TODO implement a better test... for (int i=0; i<population.size(); ++i) { std::cout << population[i].getError().size() << std::endl; if (i<2) BOOST_CHECK_EQUAL(population[i].getError().size(), 0); // error has not been calculated else BOOST_CHECK_EQUAL(population[i].getError().size(), batch_size*memory_size); // error has been calculated } } BOOST_AUTO_TEST_CASE(evalModels) { const std::vector<std::string> input_nodes = { "Input_000000000000" }; // true inputs + biases const std::vector<std::string> output_nodes = { "Output_000000000000" }; const int batch_size = 5; const int memory_size = 8; const int n_epochs_training = 5; const int n_epochs_validation = 5; const int n_epochs_evaluation = 5; PopulationTrainerExt<float> population_trainer; std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < 2; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; model_interpreters.push_back(ModelInterpreterDefaultDevice<float>(model_resources)); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(batch_size); model_trainer.setMemorySize(memory_size); model_trainer.setNEpochsTraining(n_epochs_training); model_trainer.setNEpochsValidation(n_epochs_validation); model_trainer.setNEpochsEvaluation(n_epochs_evaluation); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); model_trainer.setLossFunctionHelpers(loss_function_helpers); ModelReplicatorExt<float> model_replicator; model_replicator.setNNodeDownAdditions(1); model_replicator.setNLinkAdditions(1); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); ModelBuilder<float> model_builder; // create an initial population std::vector<Model<float>> population; for (int i = 0; i < 4; ++i) { Model<float> model; // make the baseline model std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 1); node_names = model_builder.addFullyConnected(model, "Hidden1", "Mod1", node_names, 1, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.01, 0.9, 0.999, 1e-8)), 0, 0); node_names = model_builder.addFullyConnected(model, "Output", "Mod2", node_names, 1, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.01, 0.9, 0.999, 1e-8)), 0, 0); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setId(i); model.setName(std::to_string(i)); population.push_back(model); } // Break two of the models for (int i = 0; i < 2; ++i) { model_replicator.deleteLink(population[i], 1e6); model_replicator.deleteLink(population[i], 1e6); model_replicator.deleteLink(population[i], 1e6); } // Toy data set used for all tests // Make the input data Eigen::Tensor<float, 4> input_data(batch_size, memory_size, (int)input_nodes.size(), n_epochs_training); Eigen::Tensor<float, 3> input_tmp(batch_size, memory_size, (int)input_nodes.size()); input_tmp.setValues( { {{8}, {7}, {6}, {5}, {4}, {3}, {2}, {1}}, {{9}, {8}, {7}, {6}, {5}, {4}, {3}, {2}}, {{10}, {9}, {8}, {7}, {6}, {5}, {4}, {3}}, {{11}, {10}, {9}, {8}, {7}, {6}, {5}, {4}}, {{12}, {11}, {10}, {9}, {8}, {7}, {6}, {5}} } ); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter < (int)input_nodes.size(); ++nodes_iter) for (int epochs_iter = 0; epochs_iter < n_epochs_training; ++epochs_iter) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = input_tmp(batch_iter, memory_iter, nodes_iter); // Make the simulation time_steps Eigen::Tensor<float, 3> time_steps(batch_size, memory_size, n_epochs_training); Eigen::Tensor<float, 2> time_steps_tmp(batch_size, memory_size); time_steps_tmp.setValues({ {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1} } ); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) for (int epochs_iter = 0; epochs_iter < n_epochs_training; ++epochs_iter) time_steps(batch_iter, memory_iter, epochs_iter) = time_steps_tmp(batch_iter, memory_iter); population_trainer.evalModels(population, model_trainer, model_interpreters,ModelLogger<float>(), input_data, time_steps, input_nodes); BOOST_CHECK_EQUAL(population.size(), 4); // broken models should still be there for (int i = 0; i < population.size(); ++i) { Eigen::Tensor<float, 0> total_output = population[i].getNodesMap().at(output_nodes[0])->getOutput().sum(); if (i < 2) { BOOST_CHECK_EQUAL(population[i].getError().size(), 0); // error has not been calculated BOOST_CHECK_EQUAL(total_output(0), 0); BOOST_CHECK_EQUAL(population[i].getNodesMap().at(output_nodes[0])->getOutput().size(), 0); } else { BOOST_CHECK_EQUAL(population[i].getError().size(), 0); // error has not been calculated BOOST_CHECK_EQUAL(total_output(0), 260); BOOST_CHECK_EQUAL(population[i].getNodesMap().at(output_nodes[0])->getOutput().size(), batch_size*(memory_size + 1)); } } } BOOST_AUTO_TEST_CASE(exampleUsage) { PopulationTrainerExt<float> population_trainer; population_trainer.setNTop(2); population_trainer.setNRandom(2); population_trainer.setNReplicatesPerModel(3); population_trainer.setNGenerations(5); population_trainer.setLogging(true); // define the model logger ModelLogger<float> model_logger; // define the population logger PopulationLogger<float> population_logger(true, true); // Toy data set used for all tests DataSimulatorExt<float> data_simulator; const std::vector<std::string> input_nodes = {"Input_000000000000"}; // true inputs + biases const std::vector<std::string> output_nodes = {"Output_000000000000"}; const int batch_size = 5; const int memory_size = 8; const int n_epochs_training = 5; const int n_epochs_validation = 5; const int n_epochs_evaluation = 5; // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < 2; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; model_interpreters.push_back(ModelInterpreterDefaultDevice<float>(model_resources)); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(batch_size); model_trainer.setMemorySize(memory_size); model_trainer.setNEpochsTraining(n_epochs_training); model_trainer.setNEpochsValidation(n_epochs_validation); model_trainer.setNEpochsEvaluation(n_epochs_evaluation); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); model_trainer.setLossFunctionHelpers(loss_function_helpers); // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; model_replicator.setNNodeDownAdditions(1); model_replicator.setNLinkAdditions(1); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); // define the initial population of 10 baseline models std::cout << "Making the initial population..." << std::endl; ModelBuilder<float> model_builder; std::vector<Model<float>> population; const int population_size = 8; for (int i = 0; i<population_size; ++i) { Model<float> model; // make the baseline model std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 1); node_names = model_builder.addFullyConnected(model, "Hidden1", "Mod1", node_names, 1, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.01, 0.9, 0.999, 1e-8)), 0, 0); node_names = model_builder.addFullyConnected(model, "Output", "Mod2", node_names, 1, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.01, 0.9, 0.999, 1e-8)), 0, 0); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); population.push_back(model); } // Evolve the population std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( population, "Test_population", model_trainer, model_interpreters,model_replicator, data_simulator, model_logger, population_logger, input_nodes); PopulationTrainerFile<float> population_trainer_file; population_trainer_file.storeModels(population, "populationTrainer"); population_trainer_file.storeModelValidations("populationTrainerValidationErrors.csv", models_validation_errors_per_generation); // [TODO: check that one of the models has a 0.0 error // i.e., correct structure and weights] } // [TODO: test for evaluatePopulation] BOOST_AUTO_TEST_SUITE_END()<file_sep> include(CppcheckTargets) ## we use only source files for cppcheck set(SOURCE_FILE_REGEX "\\.cpp$") # -------------------------------------------------------------------------- # add_cpp_check_tests : This macro generates cppcheck tests for files in the # given directory. # # The function searches for all sources files in the given directory and # and generates a cppcheck tests for each individual file. macro(add_cpp_check_tests _directory) # find files in _directory file(GLOB_RECURSE _source_files RELATIVE ${SMARTPEAK_HOST_DIRECTORY}/src/${_directory}/ ${SMARTPEAK_HOST_DIRECTORY}/src/${_directory}/*.cpp) # add tests foreach(_file_to_test ${_source_files}) string( REGEX MATCH ${SOURCE_FILE_REGEX} _is_source_file ${_file_to_test} ) string( REGEX MATCH ${DO_NOT_TEST_THESE_FILES_REGEX} _do_not_test ${_file_to_test} ) if(_is_source_file AND NOT _do_not_test) set(_test_name "src/${_directory}/${_file_to_test}") add_cppcheck_sources(${_test_name} ${SMARTPEAK_HOST_DIRECTORY}/src/${_directory}/${_file_to_test} STYLE PERFORMANCE INLINE_SUPPRESSION FAIL_ON_WARNINGS) endif() endforeach() endmacro() # -------------------------------------------------------------------------- add_cpp_check_tests("smartpeak") <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_ADDPROBSIMULATOR_H #define EVONET_ADDPROBSIMULATOR_H #include <EvoNet/simulator/DataSimulator.h> #include <EvoNet/core/Preprocessing.h> namespace EvoNet { /** @brief implementation of the add problem that has been used to test sequence prediction in RNNS References: [TODO] */ template<typename TensorT> class AddProbSimulator: public DataSimulator<TensorT> { public: /* @brief implementation of the add problem that has been used to test sequence prediction in RNNS @param[in, out] random_sequence @param[in, out] mask_sequence @param[in] n_masks The number of random additions @returns the result of the two random numbers in the sequence **/ static TensorT AddProb( Eigen::Tensor<TensorT, 1>& random_sequence, Eigen::Tensor<TensorT, 1>& mask_sequence, const int& n_masks) { TensorT result = 0.0; const int sequence_length = random_sequence.size(); std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<> zero_to_one(0.0, 1.0); // in the range of abs(min/max(+/-0.5)) + abs(min/max(+/-0.5)) for TanH std::uniform_int_distribution<> zero_to_length(0, sequence_length - 1); // generate 2 random and unique indexes between // [0, sequence_length) for the mask std::vector<int> mask_indices = { zero_to_length(gen) }; for (int i = 0; i < n_masks - 1; ++i) { int mask_index = 0; do { mask_index = zero_to_length(gen); } while (std::count(mask_indices.begin(), mask_indices.end(), mask_index) != 0); mask_indices.push_back(mask_index); } // generate the random sequence // and the mask sequence for (int i = 0; i < sequence_length; ++i) { // the random sequence random_sequence(i) = zero_to_one(gen); // the mask if (std::count(mask_indices.begin(), mask_indices.end(), i) != 0) mask_sequence(i) = 1.0; else mask_sequence(i) = 0.0; // result update result += mask_sequence(i) * random_sequence(i); } //std::cout<<"mask sequence: "<<mask_sequence<<std::endl; [TESTS:convert to a test!] //std::cout<<"random sequence: "<<random_sequence<<std::endl; [TESTS:convert to a test!] //std::cout<<"result: "<<result<<std::endl; [TESTS:convert to a test!] return result; } int n_mask_ = 5; int sequence_length_ = 25; }; } #endif //EVONET_ADDPROBSIMULATOR_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelReplicator test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelReplicator.h> #include <iostream> #include <algorithm> // tokenizing #include <regex> // tokenizing using namespace EvoNet; using namespace std; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> { public: void adaptiveReplicatorScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) override { } }; BOOST_AUTO_TEST_SUITE(ModelReplicator1) BOOST_AUTO_TEST_CASE(constructor) { ModelReplicatorExt<float>* ptr = nullptr; ModelReplicatorExt<float>* nullPointer = nullptr; ptr = new ModelReplicatorExt<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ModelReplicatorExt<float>* ptr = nullptr; ptr = new ModelReplicatorExt<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(gettersAndSetters) { ModelReplicatorExt<float> model_replicator; model_replicator.setNNodeDownAdditions(1); model_replicator.setNNodeRightAdditions(8); model_replicator.setNNodeDownCopies(9); model_replicator.setNNodeRightCopies(10); model_replicator.setNLinkAdditions(2); model_replicator.setNLinkCopies(11); model_replicator.setNNodeDeletions(3); model_replicator.setNLinkDeletions(4); model_replicator.setNWeightChanges(5); model_replicator.setWeightChangeStDev(6.0f); model_replicator.setNNodeActivationChanges(6); model_replicator.setNNodeIntegrationChanges(7); std::vector<std::pair<std::shared_ptr<ActivationOp<float>>, std::shared_ptr<ActivationOp<float>>>> activations = { std::make_pair(std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>())) }; model_replicator.setNodeActivations(activations); std::vector<std::tuple<std::shared_ptr<IntegrationOp<float>>, std::shared_ptr<IntegrationErrorOp<float>>, std::shared_ptr<IntegrationWeightGradOp<float>>>> integrations = { std::make_tuple(std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())) }; model_replicator.setNodeIntegrations(integrations); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 1); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 8); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 9); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 10); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 2); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 11); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 3); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 4); BOOST_CHECK_EQUAL(model_replicator.getNWeightChanges(), 5); BOOST_CHECK_EQUAL(model_replicator.getWeightChangeStDev(), 6.0f); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 6); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 7); BOOST_CHECK(model_replicator.getNodeActivations()[0] == activations[0]); BOOST_CHECK(model_replicator.getNodeIntegrations()[0] == integrations[0]); model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(1, 1), std::make_pair(2, 2), std::make_pair(3, 3), std::make_pair(4, 4), std::make_pair(5, 5), std::make_pair(6, 6), std::make_pair(7, 7), std::make_pair(8, 8), std::make_pair(9, 9), std::make_pair(10, 10), std::make_pair(11, 11), std::make_pair(12, 12)); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[0].first, 0); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[0].second, 0); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[1].first, 1); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[1].second, 1); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[2].first, 2); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[2].second, 2); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[3].first, 3); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[3].second, 3); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[4].first, 4); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[4].second, 4); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[5].first, 5); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[5].second, 5); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[6].first, 6); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[6].second, 6); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[7].first, 7); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[7].second, 7); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[8].first, 8); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[8].second, 8); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[9].first, 9); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[9].second, 9); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[10].first, 10); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[10].second, 10); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[11].first, 11); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[11].second, 11); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[12].first, 12); BOOST_CHECK_EQUAL(model_replicator.getRandomModifications()[12].second, 12); } BOOST_AUTO_TEST_CASE(setAndMakeRandomModifications) { ModelReplicatorExt<float> model_replicator; // node additions down model_replicator.setRandomModifications( std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_NE(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // node additions right model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_NE(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // copy additions down model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_NE(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // copy additions right model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_NE(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // link additions model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_NE(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // link copies model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_NE(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // node deletions model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_NE(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // link deletions model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_NE(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // node activation changes model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_NE(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // node integration changes model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_NE(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // module additions changes model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_NE(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // module additions changes model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2), std::make_pair(0, 0)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_NE(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleDeletions(), 0); // module deletions changes model_replicator.setRandomModifications( std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(1, 2)); model_replicator.makeRandomModifications(); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDownCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeRightCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkCopies(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNLinkDeletions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeActivationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNNodeIntegrationChanges(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleAdditions(), 0); BOOST_CHECK_EQUAL(model_replicator.getNModuleCopies(), 0); BOOST_CHECK_NE(model_replicator.getNModuleDeletions(), 0); } BOOST_AUTO_TEST_CASE(makeUniqueHash) { ModelReplicatorExt<float> model_replicator; std::string unique_str, left_str, right_str; left_str = "hello"; bool left_str_found, right_str_found; for (int i=0; i<5; ++i) { right_str = std::to_string(i); unique_str = model_replicator.makeUniqueHash(left_str, right_str); std::regex re("_"); std::vector<std::string> unique_str_tokens; std::copy( std::sregex_token_iterator(unique_str.begin(), unique_str.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(unique_str_tokens)); left_str_found = false; if (unique_str_tokens.size() > 1 && left_str == unique_str_tokens[0]) left_str_found = true; BOOST_CHECK(left_str_found); right_str_found = false; if (unique_str_tokens.size() > 2 && right_str == unique_str_tokens[1]) right_str_found = true; BOOST_CHECK(right_str_found); } } BOOST_AUTO_TEST_CASE(updateName) { ModelReplicatorExt<float> model_replicator; std::string new_node_name, node_prefix; // control model_replicator.updateName("Node1", "%s", "", node_prefix, new_node_name); BOOST_CHECK_EQUAL(node_prefix, "Node1"); BOOST_CHECK_NE(new_node_name, "Node1"); // test model_replicator.updateName("Node1@2018", "%s", "", node_prefix, new_node_name); BOOST_CHECK_EQUAL(node_prefix, "Node1"); BOOST_CHECK_NE(new_node_name, "Node1"); } Model<float> makeModel1() { /** * Directed Acyclic Graph Toy Network Model */ Node<float> i1, i2, h1, h2, o1, o2, b1, b2; Link l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4; Weight<float> w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4; Model<float> model1; // Toy network: 1 hidden layer, fully connected, DAG i1 = Node<float>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("2", NodeType::hidden, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h2 = Node<float>("3", NodeType::hidden, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("4", NodeType::output, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("5", NodeType::output, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<float>("6", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<float>("7", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1 = Weight<float>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2 = Weight<float>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3 = Weight<float>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w4 = Weight<float>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb1 = Weight<float>("4", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb2 = Weight<float>("5", weight_init, solver); // input layer + bias l1 = Link("0_to_2", "0", "2", "0"); l2 = Link("0_to_3", "0", "3", "1"); l3 = Link("1_to_2", "1", "2", "2"); l4 = Link("1_to_3", "1", "3", "3"); lb1 = Link("6_to_2", "6", "2", "4"); lb2 = Link("6_to_3", "6", "3", "5"); // weights weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w5 = Weight<float>("6", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w6 = Weight<float>("7", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w7 = Weight<float>("8", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w8 = Weight<float>("9", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb3 = Weight<float>("10", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb4 = Weight<float>("11", weight_init, solver); // hidden layer + bias l5 = Link("2_to_4", "2", "4", "6"); l6 = Link("2_to_5", "2", "5", "7"); l7 = Link("3_to_4", "3", "4", "8"); l8 = Link("3_to_5", "3", "5", "9"); lb3 = Link("7_to_4", "7", "4", "10"); lb4 = Link("7_to_5", "7", "5", "11"); // define a module lb1.setModuleName("Module1"); lb2.setModuleName("Module1"); wb1.setModuleName("Module1"); wb2.setModuleName("Module1"); h1.setModuleName("Module1"); h2.setModuleName("Module1"); b1.setModuleName("Module1"); model1.setId(1); model1.addNodes({i1, i2, h1, h2, o1, o2, b1, b2}); model1.addWeights({w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4}); model1.addLinks({l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4}); return model1; } BOOST_AUTO_TEST_CASE(selectNodes) { // [TODO: make test; currenlty, combined with selectRandomNode1] } Model<float> model_selectRandomNode1 = makeModel1(); BOOST_AUTO_TEST_CASE(selectRandomNode1) { ModelReplicatorExt<float> model_replicator; std::vector<NodeType> exclusion_list, inclusion_list; std::string random_node; bool test_passed; // [TODO: add loop here with iter = 100] exclusion_list = {NodeType::bias, NodeType::input}; inclusion_list = {}; std::vector<std::string> node_names = {"2", "3", "4", "5"}; random_node = model_replicator.selectRandomNode(model_selectRandomNode1, exclusion_list, inclusion_list); test_passed = false; if (std::count(node_names.begin(), node_names.end(), random_node) != 0) test_passed = true; BOOST_CHECK(test_passed); exclusion_list = {}; inclusion_list = {NodeType::hidden, NodeType::output}; random_node = model_replicator.selectRandomNode(model_selectRandomNode1, exclusion_list, inclusion_list); test_passed = false; if (std::count(node_names.begin(), node_names.end(), random_node) != 0) test_passed = true; BOOST_CHECK(test_passed); } Model<float> model_selectRandomLink1 = makeModel1(); BOOST_AUTO_TEST_CASE(selectRandomLink1) { ModelReplicatorExt<float> model_replicator; std::vector<NodeType> source_exclusion_list, source_inclusion_list, sink_exclusion_list, sink_inclusion_list; std::string random_link; bool test_passed; std::vector<std::string> link_names = {"2_to_4", "3_to_4", "2_to_5", "3_to_5"}; // [TODO: add loop here with iter = 100] source_exclusion_list = {NodeType::bias, NodeType::input}; source_inclusion_list = {}; sink_exclusion_list = {NodeType::bias, NodeType::input}; sink_inclusion_list = {}; random_link = model_replicator.selectRandomLink( model_selectRandomLink1, source_exclusion_list, source_inclusion_list, sink_exclusion_list, sink_inclusion_list); test_passed = false; if (std::count(link_names.begin(), link_names.end(), random_link) != 0) test_passed = true; BOOST_CHECK(test_passed); source_exclusion_list = {NodeType::bias, NodeType::input}; source_inclusion_list = {NodeType::hidden, NodeType::output}; sink_exclusion_list = {}; sink_inclusion_list = {}; random_link = model_replicator.selectRandomLink( model_selectRandomLink1, source_exclusion_list, source_inclusion_list, sink_exclusion_list, sink_inclusion_list); test_passed = false; if (std::count(link_names.begin(), link_names.end(), random_link) != 0) test_passed = true; BOOST_CHECK(test_passed); } Model<float> model_selectModules1 = makeModel1(); BOOST_AUTO_TEST_CASE(selectModules) { ModelReplicatorExt<float> model_replicator; std::vector<std::string> test1 = model_replicator.selectModules(model_selectModules1, {}, {}); BOOST_CHECK_EQUAL(test1[0], "Module1"); std::vector<std::string> test2 = model_replicator.selectModules(model_selectModules1, {NodeType::hidden}, {}); BOOST_CHECK_EQUAL(test2[0], "Module1"); std::vector<std::string> test3 = model_replicator.selectModules(model_selectModules1, { NodeType::hidden, NodeType::bias }, {}); BOOST_CHECK_EQUAL(test3.size(), 0); std::vector<std::string> test4 = model_replicator.selectModules(model_selectModules1, {}, { NodeType::hidden }); BOOST_CHECK_EQUAL(test4[0], "Module1"); } Model<float> model_selectRandomModule1 = makeModel1(); BOOST_AUTO_TEST_CASE(selectRandomModule1) { ModelReplicatorExt<float> model_replicator; std::vector<NodeType> exclusion_list, inclusion_list; std::string random_module; exclusion_list = {}; inclusion_list = {}; random_module = model_replicator.selectRandomModule(model_selectRandomNode1, exclusion_list, inclusion_list); BOOST_CHECK_EQUAL(random_module, "Module1"); } Model<float> model_addLink = makeModel1(); BOOST_AUTO_TEST_CASE(addLink) { ModelReplicatorExt<float> model_replicator; model_replicator.addLink(model_addLink); std::vector<std::string> link_names = { "Link_0_to_2", "Link_0_to_3", "Link_1_to_2", "Link_1_to_3", // existing links "Link_2_to_4", "Link_2_to_5", "Link_3_to_4", "Link_3_to_5", // existing links "Link_0_to_4", "Link_0_to_5", "Link_1_to_4", "Link_1_to_5", // new links "Link_2_to_3", "Link_3_to_2", "Link_4_to_5", "Link_5_to_4", // new links "Link_4_to_2", "Link_5_to_2", "Link_4_to_3", "Link_5_to_3", // new cyclic links "Link_2_to_2", "Link_5_to_5", "Link_4_to_4", "Link_3_to_3", // new cyclic links }; std::vector<std::string> weight_names = { "Weight_0_to_2", "Weight_0_to_3", "Weight_1_to_2", "Weight_1_to_3", // existing weights "Weight_2_to_4", "Weight_2_to_5", "Weight_3_to_4", "Weight_3_to_5", // existing weights "Weight_0_to_4", "Weight_0_to_5", "Weight_1_to_4", "Weight_1_to_5", // new weights "Weight_2_to_3", "Weight_3_to_2", "Weight_4_to_5", "Weight_5_to_4", // new weights "Weight_4_to_2", "Weight_5_to_2", "Weight_4_to_3", "Weight_5_to_3", // new cyclic weights "Weight_2_to_2", "Weight_5_to_5", "Weight_4_to_4", "Weight_3_to_3", // new cyclic weights }; // [TODO: add loop here with iter = 100] std::regex re("@"); bool link_found = false; std::string link_name = model_addLink.getLinks().rbegin()->getName(); std::vector<std::string> link_name_tokens; std::copy( std::sregex_token_iterator(link_name.begin(), link_name.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(link_name_tokens)); if (std::count(link_names.begin(), link_names.end(), link_name_tokens[0]) != 0) link_found = true; // [TODO: add tests for the correct tokens after @] // std::regex re(":"); to split the "addLinks" from the timestamp BOOST_CHECK(link_found); bool weight_found = false; std::string weight_name = model_addLink.getWeights().rbegin()->getName(); std::vector<std::string> weight_name_tokens; std::copy( std::sregex_token_iterator(weight_name.begin(), weight_name.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(weight_name_tokens)); if (std::count(weight_names.begin(), weight_names.end(), weight_name_tokens[0]) != 0) // [TODO: implement getWeights] weight_found = true; // [TODO: add tests for the correct tokens after @] // std::regex re(":"); to split the "addLinks" from the timestamp BOOST_CHECK(weight_found); } Model<float> model_copyLink = makeModel1(); BOOST_AUTO_TEST_CASE(copyLink) { ModelReplicatorExt<float> model_replicator; model_replicator.copyLink(model_copyLink); std::vector<std::string> link_names = { "Link_0_to_2", "Link_0_to_3", "Link_1_to_2", "Link_1_to_3", // existing links "Link_2_to_4", "Link_2_to_5", "Link_3_to_4", "Link_3_to_5", // existing links "Link_0_to_4", "Link_0_to_5", "Link_1_to_4", "Link_1_to_5", // new links "Link_2_to_3", "Link_3_to_2", "Link_4_to_5", "Link_5_to_4", // new links "Link_4_to_2", "Link_5_to_2", "Link_4_to_3", "Link_5_to_3", // new cyclic links "Link_2_to_2", "Link_5_to_5", "Link_4_to_4", "Link_3_to_3", // new cyclic links }; std::vector<std::string> weight_names = { "0", "1", "2", "3", "4", "5", "6","7","8","9","10","11" }; // [TODO: add loop here with iter = 100] std::regex re("@"); bool link_found = false; std::string link_name = model_copyLink.getLinks().rbegin()->getName(); std::vector<std::string> link_name_tokens; std::copy( std::sregex_token_iterator(link_name.begin(), link_name.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(link_name_tokens)); if (std::count(link_names.begin(), link_names.end(), link_name_tokens[0]) != 0) link_found = true; // [TODO: add tests for the correct tokens after @] // std::regex re(":"); to split the "copyLinks" from the timestamp BOOST_CHECK(link_found); bool weight_found = true; for (const auto& weight_map: model_copyLink.weights_) if (std::count(weight_names.begin(), weight_names.end(), weight_map.second->getName()) == 0) // [TODO: implement getWeights] weight_found = false; // [TODO: add tests for the correct tokens after @] // std::regex re(":"); to split the "copyLinks" from the timestamp BOOST_CHECK(weight_found); } Model<float> model_addNodeDown = makeModel1(); BOOST_AUTO_TEST_CASE(addNodeDown) { ModelReplicatorExt<float> model_replicator; model_replicator.addNodeDown(model_addNodeDown); std::vector<std::string> node_names = { "2", "3", "4", "5" // existing nodes }; // [TODO: add loop here with iter = 100] std::regex re("@"); // check that the node was found bool node_found = false; std::string node_name = ""; for (const Node<float>& node: model_addNodeDown.getNodes()) { node_name = node.getName(); std::vector<std::string> node_name_tokens; std::copy( std::sregex_token_iterator(node_name.begin(), node_name.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(node_name_tokens)); if (node_name_tokens.size() > 1 && std::count(node_names.begin(), node_names.end(), node_name_tokens[0]) != 0) { node_found = true; break; } } BOOST_CHECK(node_found); // check the correct text after @ bool add_node_marker_found = false; std::regex re_addNodes("@|#"); std::vector<std::string> node_text_tokens; std::copy( std::sregex_token_iterator(node_name.begin(), node_name.end(), re_addNodes, -1), std::sregex_token_iterator(), std::back_inserter(node_text_tokens)); if (node_text_tokens.size() > 1 && node_text_tokens[1] == "addNodeDown") add_node_marker_found = true; BOOST_CHECK(add_node_marker_found); // [TODO: check that the node is of the correct type] // [TODO: check that the modified link was found] // [TODO: check that the modified link weight name was not changed] // [TODO: check that the new link was found] // [TODO: check that the new weight was found] } Model<float> model_addNodeRight = makeModel1(); BOOST_AUTO_TEST_CASE(addNodeRight) { ModelReplicatorExt<float> model_replicator; model_replicator.addNodeRight(model_addNodeRight); std::vector<std::string> node_names = { "2", "3", "4", "5" // existing nodes }; // [TODO: add loop here with iter = 100] std::regex re("@"); // check that the node was found bool node_found = false; std::string node_name = ""; for (const Node<float>& node : model_addNodeRight.getNodes()) { node_name = node.getName(); std::vector<std::string> node_name_tokens; std::copy( std::sregex_token_iterator(node_name.begin(), node_name.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(node_name_tokens)); if (node_name_tokens.size() > 1 && std::count(node_names.begin(), node_names.end(), node_name_tokens[0]) != 0) { node_found = true; break; } } BOOST_CHECK(node_found); // check the correct text after @ bool add_node_marker_found = false; std::regex re_addNodes("@|#"); std::vector<std::string> node_text_tokens; std::copy( std::sregex_token_iterator(node_name.begin(), node_name.end(), re_addNodes, -1), std::sregex_token_iterator(), std::back_inserter(node_text_tokens)); if (node_text_tokens.size() > 1 && node_text_tokens[1] == "addNodeRight") add_node_marker_found = true; BOOST_CHECK(add_node_marker_found); // [TODO: check that the node is of the correct type] // [TODO: check that the new modified links were found] // [TODO: check that the new modified weighs were found] // [TODO: check that the new link/weight/node bias were found] } Model<float> model_copyNodeDown = makeModel1(); BOOST_AUTO_TEST_CASE(copyNodeDown) { ModelReplicatorExt<float> model_replicator; model_replicator.copyNodeDown(model_copyNodeDown); std::vector<std::string> node_names = { "2", "3", "4", "5" // existing nodes }; // [TODO: add loop here with iter = 100] std::regex re("@"); // check that the node was found bool node_found = false; std::string node_name = ""; for (const Node<float>& node : model_copyNodeDown.getNodes()) { node_name = node.getName(); std::vector<std::string> node_name_tokens; std::copy( std::sregex_token_iterator(node_name.begin(), node_name.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(node_name_tokens)); if (node_name_tokens.size() > 1 && std::count(node_names.begin(), node_names.end(), node_name_tokens[0]) != 0) { node_found = true; break; } } BOOST_CHECK(node_found); // check the correct text after @ bool add_node_marker_found = false; std::regex re_copyNodes("@|#"); std::vector<std::string> node_text_tokens; std::copy( std::sregex_token_iterator(node_name.begin(), node_name.end(), re_copyNodes, -1), std::sregex_token_iterator(), std::back_inserter(node_text_tokens)); if (node_text_tokens.size() > 1 && node_text_tokens[1] == "copyNodeDown") add_node_marker_found = true; BOOST_CHECK(add_node_marker_found); // [TODO: check that the node is of the correct type] // [TODO: check that the modified link was found] // [TODO: check that the modified link weight name was not changed] // [TODO: check that the new link was found] // [TODO: check that the new weight was found] } Model<float> model_copyNodeRight = makeModel1(); BOOST_AUTO_TEST_CASE(copyNodeRight) { ModelReplicatorExt<float> model_replicator; model_replicator.copyNodeRight(model_copyNodeRight); std::vector<std::string> node_names = { "2", "3", "4", "5" // existing nodes }; // [TODO: add loop here with iter = 100] std::regex re("@"); // check that the node was found bool node_found = false; std::string node_name = ""; for (const Node<float>& node : model_copyNodeRight.getNodes()) { node_name = node.getName(); std::vector<std::string> node_name_tokens; std::copy( std::sregex_token_iterator(node_name.begin(), node_name.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(node_name_tokens)); if (node_name_tokens.size() > 1 && std::count(node_names.begin(), node_names.end(), node_name_tokens[0]) != 0) { node_found = true; break; } } BOOST_CHECK(node_found); // check the correct text after @ bool add_node_marker_found = false; std::regex re_copyNodes("@|#"); std::vector<std::string> node_text_tokens; std::copy( std::sregex_token_iterator(node_name.begin(), node_name.end(), re_copyNodes, -1), std::sregex_token_iterator(), std::back_inserter(node_text_tokens)); if (node_text_tokens.size() > 1 && node_text_tokens[1] == "copyNodeRight") add_node_marker_found = true; BOOST_CHECK(add_node_marker_found); // [TODO: check that the node is of the correct type] // [TODO: check that the new modified links were found] // [TODO: check that the new modified weighs were found] // [TODO: check that the new link/weight/node bias were found] } Model<float> model_deleteNode = makeModel1(); BOOST_AUTO_TEST_CASE(deleteNode) { ModelReplicatorExt<float> model_replicator; model_replicator.deleteNode(model_deleteNode, 10); BOOST_CHECK_EQUAL(model_deleteNode.getNodes().size(), 7); BOOST_CHECK_EQUAL(model_deleteNode.getLinks().size(), 7); BOOST_CHECK_EQUAL(model_deleteNode.getWeights().size(), 7); model_replicator.deleteNode(model_deleteNode, 10); BOOST_CHECK_EQUAL(model_deleteNode.getNodes().size(),3); BOOST_CHECK_EQUAL(model_deleteNode.getLinks().size(), 2); BOOST_CHECK_EQUAL(model_deleteNode.getWeights().size(), 2); model_replicator.deleteNode(model_deleteNode, 10); BOOST_CHECK_EQUAL(model_deleteNode.getNodes().size(), 3); BOOST_CHECK_EQUAL(model_deleteNode.getLinks().size(), 2); BOOST_CHECK_EQUAL(model_deleteNode.getWeights().size(), 2); } Model<float> model_deleteLink = makeModel1(); BOOST_AUTO_TEST_CASE(deleteLink) { ModelReplicatorExt<float> model_replicator; model_replicator.deleteLink(model_deleteLink, 10); BOOST_CHECK_EQUAL(model_deleteLink.getNodes().size(), 8); BOOST_CHECK_EQUAL(model_deleteLink.getLinks().size(), 11); // [TODO: additional tests needed?] } Model<float> model_changeNodeActivation = makeModel1(); BOOST_AUTO_TEST_CASE(changeNodeActivation) { ModelReplicatorExt<float> model_replicator; model_replicator.setNodeActivations({ std::make_pair(std::make_shared<ELUOp<float>>(ELUOp<float>()), std::make_shared<ELUGradOp<float>>(ELUGradOp<float>()))}); std::vector<std::string> node_names = { "0", "1", "2", "3", "4", "5", "6", "7" }; model_replicator.changeNodeActivation(model_changeNodeActivation); // [TODO: add loop here with iter = 100] int linear_cnt = 0; int relu_cnt = 0; int elu_cnt = 0; for (const std::string& node_name : node_names) { const Node<float> node = model_changeNodeActivation.getNode(node_name); if (node.getActivation()->getName() == "LinearOp") ++linear_cnt; else if (node.getActivation()->getName() == "ReLUOp") ++relu_cnt; else if (node.getActivation()->getName() == "ELUOp") ++elu_cnt; } BOOST_CHECK_EQUAL(linear_cnt, 4); BOOST_CHECK_EQUAL(relu_cnt, 3); BOOST_CHECK_EQUAL(elu_cnt, 1); } Model<float> model_changeNodeIntegration = makeModel1(); BOOST_AUTO_TEST_CASE(changeNodeIntegration) { ModelReplicatorExt<float> model_replicator; model_replicator.setNodeIntegrations({ std::make_tuple(std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>())) }); std::vector<std::string> node_names = { "0", "1", "2", "3", "4", "5", "6", "7" }; model_replicator.changeNodeIntegration(model_changeNodeIntegration); // [TODO: add loop here with iter = 100] int sum_cnt = 0; int product_cnt = 0; for (const std::string& node_name : node_names) { const Node<float> node = model_changeNodeIntegration.getNode(node_name); if (node.getIntegration()->getName() == "SumOp") ++sum_cnt; else if (node.getIntegration()->getName() == "ProdOp") ++product_cnt; } BOOST_CHECK_EQUAL(sum_cnt, 7); BOOST_CHECK_EQUAL(product_cnt, 1); } Model<float> model_addModule = makeModel1(); BOOST_AUTO_TEST_CASE(addModule) { ModelReplicatorExt<float> model_replicator; model_replicator.addModule(model_addModule); // new module components std::vector<std::string> node_names_prefix = {"2", "3", "6"}; std::vector<std::string> link_names_prefix = { "6_to_2", "6_to_3", // new module "0_to_2", "0_to_3", "1_to_2", "1_to_3", "2_to_4", "2_to_5", "3_to_4", "3_to_5" }; // new connections std::vector<std::string> weight_names_prefix = { "4", "5", // new module "0", "1", "2", "3", "6", "7", "8", "9" }; // new connections // check for the expected model size BOOST_CHECK_EQUAL(model_addModule.getNodes().size(), 11); // 8 existing + 3 new BOOST_CHECK_EQUAL(model_addModule.getLinks().size(), 22); // 12 existing + 2 new + 8 new connecting BOOST_CHECK_EQUAL(model_addModule.getWeights().size(), 22); // 12 existing + 2 new + 8 new connecting // check that the expected nodes/links/weights exist std::map<std::string, int> node_names_map, link_names_map, weight_names_map; for (const std::string& name : node_names_prefix) node_names_map.emplace(name, 0); for (const std::string& name : link_names_prefix) link_names_map.emplace(name, 0); for (const std::string& name : weight_names_prefix) weight_names_map.emplace(name, 0); for (const Node<float>& node : model_addModule.getNodes()) { std::string name_prefix, new_name; model_replicator.updateName(node.getName(), "", "", name_prefix, new_name); if (std::count(node_names_prefix.begin(), node_names_prefix.end(), name_prefix) > 0) node_names_map.at(name_prefix) += 1; } for (const Link& link : model_addModule.getLinks()) { std::string name_prefix, new_name; model_replicator.updateName(link.getName(), "", "", name_prefix, new_name); if (std::count(link_names_prefix.begin(), link_names_prefix.end(), name_prefix) > 0) link_names_map.at(name_prefix) += 1; } for (const Weight<float>& weight : model_addModule.getWeights()) { std::string name_prefix, new_name; model_replicator.updateName(weight.getName(), "", "", name_prefix, new_name); if (std::count(weight_names_prefix.begin(), weight_names_prefix.end(), name_prefix) > 0) weight_names_map.at(name_prefix) += 1; } for (const auto& name_count : node_names_map) BOOST_CHECK_EQUAL(name_count.second, 2); for (const auto& name_count : link_names_map) BOOST_CHECK_EQUAL(name_count.second, 2); for (const auto& name_count : weight_names_map) BOOST_CHECK_EQUAL(name_count.second, 2); // check the correct text after @ // [TODO: check that the node is of the correct type] // [TODO: check that the modified link was found] // [TODO: check that the modified link weight name was not changed] // [TODO: check that the new link was found] // [TODO: check that the new weight was found] } Model<float> model_copyModule = makeModel1(); BOOST_AUTO_TEST_CASE(copyModule) { ModelReplicatorExt<float> model_replicator; model_replicator.copyModule(model_copyModule); // new module components std::vector<std::string> node_names_prefix = { "2", "3", "6" }; std::vector<std::string> link_names_prefix = { "6_to_2", "6_to_3", // new module "0_to_2", "0_to_3", "1_to_2", "1_to_3", "2_to_4", "2_to_5", "3_to_4", "3_to_5" }; // new connections std::vector<std::string> weight_names_prefix = { "4", "5" }; // new module // check for the expected model size BOOST_CHECK_EQUAL(model_copyModule.getNodes().size(), 11); // 8 existing + 3 new BOOST_CHECK_EQUAL(model_copyModule.getLinks().size(), 22); // 12 existing + 2 new + 8 new connecting BOOST_CHECK_EQUAL(model_copyModule.getWeights().size(), 12); // 12 existing // check that the expected nodes/links/weights exist std::map<std::string, int> node_names_map, link_names_map, weight_names_map; for (const std::string& name : node_names_prefix) node_names_map.emplace(name, 0); for (const std::string& name : link_names_prefix) link_names_map.emplace(name, 0); for (const std::string& name : weight_names_prefix) weight_names_map.emplace(name, 0); for (const Node<float>& node : model_copyModule.getNodes()) { std::string name_prefix, new_name; model_replicator.updateName(node.getName(), "", "", name_prefix, new_name); if (std::count(node_names_prefix.begin(), node_names_prefix.end(), name_prefix) > 0) node_names_map.at(name_prefix) += 1; } for (const Link& link : model_copyModule.getLinks()) { std::string name_prefix, new_name; model_replicator.updateName(link.getName(), "", "", name_prefix, new_name); if (std::count(link_names_prefix.begin(), link_names_prefix.end(), name_prefix) > 0) link_names_map.at(name_prefix) += 1; } for (const Weight<float>& weight : model_copyModule.getWeights()) { std::string name_prefix, new_name; model_replicator.updateName(weight.getName(), "", "", name_prefix, new_name); if (std::count(weight_names_prefix.begin(), weight_names_prefix.end(), name_prefix) > 0) weight_names_map.at(name_prefix) += 1; } for (const auto& name_count : node_names_map) BOOST_CHECK_EQUAL(name_count.second, 2); for (const auto& name_count : link_names_map) BOOST_CHECK_EQUAL(name_count.second, 2); for (const auto& name_count : weight_names_map) BOOST_CHECK_EQUAL(name_count.second, 1); // check the correct text after @ // [TODO: check that the node is of the correct type] // [TODO: check that the modified link was found] // [TODO: check that the modified link weight name was not changed] // [TODO: check that the new link was found] // [TODO: check that the new weight was found] } Model<float> model_deleteModule = makeModel1(); BOOST_AUTO_TEST_CASE(deleteModule) { ModelReplicatorExt<float> model_replicator; model_replicator.deleteModule(model_deleteModule, 0); // remaining std::vector<std::string> node_names = { "0", "1", "4", "5", "7" }; std::vector<std::string> link_names = { "7_to_4", "7_to_5" }; std::vector<std::string> weight_names = { "10", "11" }; // check for the expected model size BOOST_CHECK_EQUAL(model_deleteModule.getNodes().size(), 5); // 8 existing - 3 BOOST_CHECK_EQUAL(model_deleteModule.getLinks().size(), 2); // 12 existing - 10 BOOST_CHECK_EQUAL(model_deleteModule.getWeights().size(), 2); // 12 existing - 10 // check for the expected nodes/links/weights int nodes_cnt = 0; for (const Node<float>& node : model_deleteModule.getNodes()) if (std::count(node_names.begin(), node_names.end(), node.getName()) > 0) ++nodes_cnt; BOOST_CHECK_EQUAL(nodes_cnt, 5); int links_cnt = 0; for (const Link& link : model_deleteModule.getLinks()) if (std::count(link_names.begin(), link_names.end(), link.getName()) > 0) ++links_cnt; BOOST_CHECK_EQUAL(links_cnt, 2); int weights_cnt = 0; for (const Weight<float>& weight : model_deleteModule.getWeights()) if (std::count(weight_names.begin(), weight_names.end(), weight.getName()) > 0) ++weights_cnt; BOOST_CHECK_EQUAL(weights_cnt, 2); } BOOST_AUTO_TEST_CASE(makeRandomModificationOrder) { ModelReplicatorExt<float> model_replicator; model_replicator.setNNodeDownAdditions(1); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "add_node_down"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(1); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "add_node_right"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(1); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "copy_node_down"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(1); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "copy_node_right"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(1); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "add_link"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(1); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "copy_link"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(1); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "delete_node"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(1); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "delete_link"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(1); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "change_node_activation"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(1); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "change_node_integration"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(1); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "add_module"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(1); model_replicator.setNModuleDeletions(0); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "copy_module"); model_replicator.setNNodeDownAdditions(0); model_replicator.setNNodeRightAdditions(0); model_replicator.setNNodeDownCopies(0); model_replicator.setNNodeRightCopies(0); model_replicator.setNLinkAdditions(0); model_replicator.setNLinkCopies(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleCopies(0); model_replicator.setNModuleDeletions(1); BOOST_CHECK_EQUAL(model_replicator.makeRandomModificationOrder()[0], "delete_module"); // [TODO: update?] bool add_node_found = false; bool add_link_found = false; bool delete_node_found = false; bool delete_link_found = false; bool change_node_activation_found = false; bool change_node_integration_found = false; model_replicator.setNNodeDownAdditions(2); model_replicator.setNLinkAdditions(2); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(2); model_replicator.setNNodeActivationChanges(2); model_replicator.setNNodeIntegrationChanges(2); for (const std::string& modification: model_replicator.makeRandomModificationOrder()) { if (modification == "add_node_down") add_node_found = true; else if (modification == "add_link") add_link_found = true; else if (modification == "delete_node") delete_node_found = true; else if (modification == "delete_link") delete_link_found = true; else if (modification == "change_node_activation") change_node_activation_found = true; else if (modification == "change_node_integration") change_node_integration_found = true; } BOOST_CHECK(add_node_found); BOOST_CHECK(add_link_found); BOOST_CHECK(!delete_node_found); BOOST_CHECK(delete_link_found); BOOST_CHECK(change_node_activation_found); BOOST_CHECK(change_node_integration_found); } // [TODO: update for new ModelReplicator methods] Model<float> model_modifyModel1 = makeModel1(); Model<float> model_modifyModel2 = makeModel1(); Model<float> model_modifyModel3 = makeModel1(); Model<float> model_modifyModel4 = makeModel1(); Model<float> model_modifyModel5 = makeModel1(); Model<float> model_modifyModel6 = makeModel1(); Model<float> model_modifyModel7 = makeModel1(); BOOST_AUTO_TEST_CASE(modifyModel) { ModelReplicatorExt<float> model_replicator; // No change with defaults model_replicator.modifyModel(model_modifyModel1); BOOST_CHECK_EQUAL(model_modifyModel1.getNodes().size(), 8); int node_activation_changes = 0; int node_integration_changes = 0; for (const Node<float>& node : model_modifyModel1.getNodes()) { if (node.getActivation()->getName() == "ELUOp") ++node_activation_changes; if (node.getIntegration()->getName() == "ProdOp") ++node_integration_changes; } BOOST_CHECK_EQUAL(node_activation_changes, 0); BOOST_CHECK_EQUAL(node_integration_changes, 0); BOOST_CHECK_EQUAL(model_modifyModel1.getLinks().size(), 12); BOOST_CHECK_EQUAL(model_modifyModel1.getWeights().size(), 12); model_replicator.setNNodeDownAdditions(1); model_replicator.setNLinkAdditions(1); model_replicator.modifyModel(model_modifyModel1); BOOST_CHECK_EQUAL(model_modifyModel1.getNodes().size(), 10); BOOST_CHECK_EQUAL(model_modifyModel1.getLinks().size(), 15); BOOST_CHECK_EQUAL(model_modifyModel1.getWeights().size(), 15); model_replicator.setNNodeDownAdditions(0); model_replicator.setNLinkAdditions(0); model_replicator.setNNodeDeletions(1); model_replicator.modifyModel(model_modifyModel2); BOOST_CHECK_EQUAL(model_modifyModel2.getNodes().size(), 7); BOOST_CHECK_EQUAL(model_modifyModel2.getLinks().size(), 7); BOOST_CHECK_EQUAL(model_modifyModel2.getWeights().size(), 7); model_replicator.setNNodeDownAdditions(0); model_replicator.setNLinkAdditions(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(1); model_replicator.modifyModel(model_modifyModel3); BOOST_CHECK_EQUAL(model_modifyModel3.getNodes().size(), 8); BOOST_CHECK_EQUAL(model_modifyModel3.getLinks().size(), 11); BOOST_CHECK_EQUAL(model_modifyModel3.getWeights().size(), 11); model_replicator.setNNodeDownAdditions(0); model_replicator.setNLinkAdditions(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(1); model_replicator.setNodeActivations({std::make_pair(std::make_shared<ELUOp<float>>(ELUOp<float>()), std::make_shared<ELUGradOp<float>>(ELUGradOp<float>()))}); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNodeIntegrations({std::make_tuple(std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()))}); model_replicator.modifyModel(model_modifyModel4); BOOST_CHECK_EQUAL(model_modifyModel4.getNodes().size(), 8); node_activation_changes = 0; node_integration_changes = 0; for (const Node<float>& node : model_modifyModel4.getNodes()) { if (node.getActivation()->getName() == "ELUOp") ++node_activation_changes; if (node.getIntegration()->getName() == "ProdOp") ++node_integration_changes; } BOOST_CHECK_EQUAL(node_activation_changes, 1); BOOST_CHECK_EQUAL(node_integration_changes, 0); BOOST_CHECK_EQUAL(model_modifyModel4.getLinks().size(), 12); BOOST_CHECK_EQUAL(model_modifyModel4.getWeights().size(), 12); model_replicator.setNNodeDownAdditions(0); model_replicator.setNLinkAdditions(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNodeActivations({ std::make_pair(std::make_shared<ELUOp<float>>(ELUOp<float>()), std::make_shared<ELUGradOp<float>>(ELUGradOp<float>())) }); model_replicator.setNNodeIntegrationChanges(1); model_replicator.setNodeIntegrations({ std::make_tuple(std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>())) }); model_replicator.modifyModel(model_modifyModel5); BOOST_CHECK_EQUAL(model_modifyModel5.getNodes().size(), 8); node_activation_changes = 0; node_integration_changes = 0; for (const Node<float>& node : model_modifyModel5.getNodes()) { if (node.getActivation()->getName() == "ELUOp") ++node_activation_changes; if (node.getIntegration()->getName() == "ProdOp") ++node_integration_changes; } BOOST_CHECK_EQUAL(node_activation_changes, 0); BOOST_CHECK_EQUAL(node_integration_changes, 1); BOOST_CHECK_EQUAL(model_modifyModel5.getLinks().size(), 12); BOOST_CHECK_EQUAL(model_modifyModel5.getWeights().size(), 12); model_replicator.setNNodeDownAdditions(0); model_replicator.setNLinkAdditions(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(1); model_replicator.modifyModel(model_modifyModel6); BOOST_CHECK_EQUAL(model_modifyModel6.getNodes().size(), 11); BOOST_CHECK_EQUAL(model_modifyModel6.getLinks().size(), 22); BOOST_CHECK_EQUAL(model_modifyModel6.getWeights().size(), 22); model_replicator.setNNodeDownAdditions(0); model_replicator.setNLinkAdditions(0); model_replicator.setNNodeDeletions(0); model_replicator.setNLinkDeletions(0); model_replicator.setNNodeActivationChanges(0); model_replicator.setNNodeIntegrationChanges(0); model_replicator.setNModuleAdditions(0); model_replicator.setNModuleDeletions(1); model_replicator.modifyModel(model_modifyModel7); BOOST_CHECK_EQUAL(model_modifyModel7.getNodes().size(), 3); BOOST_CHECK_EQUAL(model_modifyModel7.getLinks().size(), 2); BOOST_CHECK_EQUAL(model_modifyModel7.getWeights().size(), 2); } BOOST_AUTO_TEST_SUITE_END()<file_sep>### the directory name set(directory include/EvoNet/core) ### list all header files of the directory here set(sources_list_h half.hpp Helloworld.h Preprocessing.h Statistics.h StringParsing.h ) ### add path to the filenames set(sources_h) foreach(i ${sources_list_h}) list(APPEND sources_h ${directory}/${i}) endforeach(i) ### source group definition source_group("Header Files\\EvoNet\\core" FILES ${sources_h}) set(EvoNet_sources_h ${EvoNet_sources_h} ${sources_h}) <file_sep># Copyright (c) 2012 - 2015, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # # 2012-01-31, <NAME> # - Enable Code Coverage # # 2013-09-17, <NAME> # - Added support for Clang. # - Some additional usage instructions. # # 2016-10-11, <NAME> # - Adaption and restructuring of dependencies to fit OpenMS # # USAGE: # 0. (Mac only) If you use Xcode 5.1 make sure to patch geninfo as described here: # http://stackoverflow.com/a/22404544/80480 # # 1. Copy this file into your cmake modules path. # # 2. Add the following line to your CMakeLists.txt: # INCLUDE(CodeCoverage) # # 3. Set compiler flags to turn off optimization and enable coverage # (also use Debug build type to ensure the first two arguments): # # SET(CMAKE_CXX_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage") # SET(CMAKE_C_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage") # # 3. Use the function SETUP_TARGET_FOR_COVERAGE to create a custom make target # which runs your test executable and produces a lcov code coverage report: # Example: # SETUP_TARGET_FOR_COVERAGE( # my_coverage_target # Name for custom target. # coverage # Name of output directory. # /home/my_external_libs # Semicolon seperated paths to exclude external sources from report. # ) # # 4. Build a Debug build and run tests: # cmake -DCMAKE_BUILD_TYPE=Debug .. # make # make test # make my_coverage_target # # # Check prereqs FIND_PROGRAM( GCOV_PATH gcov ) FIND_PROGRAM( LCOV_PATH lcov ) FIND_PROGRAM( GENHTML_PATH genhtml ) IF(NOT GCOV_PATH) MESSAGE(FATAL_ERROR "gcov not found! Aborting...") ENDIF() # NOT GCOV_PATH IF(NOT CMAKE_COMPILER_IS_GNUCXX) IF(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 3.0.0) IF(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang" OR "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 5.1.0) MESSAGE(FATAL_ERROR "Compiler ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION} is neither GNU gcc nor Clang Version > 3.0.0. No support for gcov Coverage analysis.") ENDIF() ENDIF() ENDIF() # NOT CMAKE_COMPILER_IS_GNUCXX SET(CMAKE_CXX_FLAGS_COVERAGE "-g -O0 --coverage -fprofile-arcs -ftest-coverage" CACHE STRING "Flags used by the C++ compiler during coverage builds." FORCE ) SET(CMAKE_C_FLAGS_COVERAGE "-g -O0 --coverage -fprofile-arcs -ftest-coverage" CACHE STRING "Flags used by the C compiler during coverage builds." FORCE ) SET(CMAKE_EXE_LINKER_FLAGS_COVERAGE "" CACHE STRING "Flags used for linking binaries during coverage builds." FORCE ) SET(CMAKE_SHARED_LINKER_FLAGS_COVERAGE "" CACHE STRING "Flags used by the shared libraries linker during coverage builds." FORCE ) MARK_AS_ADVANCED( CMAKE_CXX_FLAGS_COVERAGE CMAKE_C_FLAGS_COVERAGE CMAKE_EXE_LINKER_FLAGS_COVERAGE CMAKE_SHARED_LINKER_FLAGS_COVERAGE ) IF ( NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_TYPE STREQUAL "Coverage")) MESSAGE( WARNING "Code coverage results with an optimized (non-Debug) build may be misleading" ) ENDIF() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" # Param _targetname The name of new the custom make target # Param _outputname lcov output is generated as _outputname.info # HTML report is generated in _outputname/index.html # Param _addignorelibpaths Adds these paths to the --remove option of lcov to e.g. exclude external sources FUNCTION(SETUP_TARGET_FOR_COVERAGE _targetname _outputname _addignorelibpaths) IF(NOT LCOV_PATH) MESSAGE(FATAL_ERROR "lcov not found! Aborting...") ENDIF() # NOT LCOV_PATH IF(NOT GENHTML_PATH) MESSAGE(FATAL_ERROR "genhtml not found! Aborting...") ENDIF() # NOT GENHTML_PATH SET(coverage_info "${CMAKE_BINARY_DIR}/${_outputname}.info") SET(coverage_cleaned "${coverage_info}.cleaned") SET(ignorelibpaths \"tests/*\" \"/usr/*\" \"/Applications/*\") foreach(libpath ${_addignorelibpaths}) list(APPEND ignorelibpaths \"${libpath}/*\") endforeach() ## Workaround that CMake does not complain during configure, that the log is missing. SET_SOURCE_FILES_PROPERTIES( ${CMAKE_CURRENT_BINARY_DIR}/Testing/Temporary/LastTest.log PROPERTIES GENERATED TRUE ) ADD_CUSTOM_COMMAND( OUTPUT ${_outputname}/index.html # Capturing lcov counters and generating report COMMAND ${LCOV_PATH} --directory . --capture --output-file ${coverage_info} # Removing external sources COMMAND ${LCOV_PATH} --remove ${coverage_info} ${ignorelibpaths} --output-file ${coverage_cleaned} # Generating html COMMAND ${GENHTML_PATH} -o ${_outputname} ${coverage_cleaned} # Remove temporaries COMMAND ${CMAKE_COMMAND} -E remove ${coverage_info} ${coverage_cleaned} # Compares the timestamps of the last tests with the index.html of the generated report and # only rebuilds if tests were performed after last report generation. MAIN_DEPENDENCY ${CMAKE_CURRENT_BINARY_DIR}/Testing/Temporary/LastTest.log VERBATIM COMMENT "Coverage data outdated. Processing code coverage counters and generating report." ) # Setup target ADD_CUSTOM_TARGET(${_targetname} # Depends on the output of the previous command. Always checks if this custom_command needs to be re-executed. DEPENDS testsExecuted ${_outputname}/index.html WORKING_DIRECTORY ${CMAKE_BINARY_DIR} COMMENT "Coverage report up-to-date. Re-run tests if you need a new report." ) # This target is basically there for a better error message. # Otherwise you get "No target for Testing/Temporary/LastTest.log" # Workaround because you can not depend on internal targets like "test" (https://gitlab.kitware.com/cmake/cmake/issues/8438) # Alternative: Always auto-invoke "make test" before. But in some scenarios you already have them built already. ADD_CUSTOM_TARGET(testsExecuted COMMAND ${CMAKE_COMMAND} -E md5sum "${CMAKE_CURRENT_BINARY_DIR}/Testing/Temporary/LastTest.log" COMMENT "Checking existence of test timestamp. If this step fails, please run 'make test' (again)." ) # Show info where to find the report and clean up. ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD COMMAND ${LCOV_PATH} --directory . --zerocounters COMMENT "Cleaning up counters.. for another fresh coverage scan please execute make test again.\nOpen ./${_outputname}/index.html in your browser to view the coverage report." ) ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE <file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/simulator/BiochemicalReaction.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; // Other extended classes template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> {}; template<typename TensorT> class MetDataSimClassification : public DataSimulator<TensorT> { public: void simulateDataClassMARs(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps, const bool& train) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_loss_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); if (train) assert(n_input_nodes == this->model_training_.reaction_ids_.size()); else assert(n_input_nodes == this->model_validation_.reaction_ids_.size()); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // pick a random sample group name std::string sample_group_name; if (train) sample_group_name = selectRandomElement(this->model_training_.sample_group_names_); else sample_group_name = selectRandomElement(this->model_validation_.sample_group_names_); // assign the input data for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { //input_data(batch_iter, memory_iter, nodes_iter) = conc_data(nodes_iter); TensorT value; if (train) value = this->model_training_.calculateMAR( this->model_training_.metabolomicsData_.at(sample_group_name), this->model_training_.biochemicalReactions_.at(this->model_training_.reaction_ids_.at(nodes_iter))); else value = this->model_validation_.calculateMAR( this->model_validation_.metabolomicsData_.at(sample_group_name), this->model_validation_.biochemicalReactions_.at(this->model_validation_.reaction_ids_.at(nodes_iter))); input_data(batch_iter, memory_iter, nodes_iter) = value; } // convert the label to a one hot vector Eigen::Tensor<TensorT, 1> one_hot_vec((int)this->model_training_.labels_.size()); if (train) one_hot_vec = OneHotEncoder<std::string, TensorT>(this->model_training_.metaData_.at(sample_group_name).condition, this->model_training_.labels_); else one_hot_vec = OneHotEncoder<std::string, TensorT>(this->model_validation_.metaData_.at(sample_group_name).condition, this->model_validation_.labels_); Eigen::Tensor<TensorT, 1> one_hot_vec_smoothed = one_hot_vec.unaryExpr(LabelSmoother<TensorT>(0.01, 0.01)); // MSE or LogLoss only size_t n_labels; if (train) n_labels = this->model_training_.labels_.size(); else n_labels = this->model_validation_.labels_.size(); for (int nodes_iter = 0; nodes_iter < n_labels; ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = one_hot_vec(nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter + (int)n_labels) = one_hot_vec(nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = one_hot_vec(nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter + (int)n_labels) = one_hot_vec(nodes_iter); } } } // update the time_steps time_steps.setConstant(1.0f); } void simulateDataClassSampleConcs(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps, const bool& train) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_loss_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); if (train) assert(n_input_nodes == this->model_training_.component_group_names_.size()); else assert(n_input_nodes == this->model_validation_.component_group_names_.size()); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // pick a random sample group name std::string sample_group_name; if (train) sample_group_name = selectRandomElement(this->model_training_.sample_group_names_); else sample_group_name = selectRandomElement(this->model_validation_.sample_group_names_); // assign the input data for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { TensorT value; if (train) value = this->model_training_.getRandomConcentration( this->model_training_.metabolomicsData_.at(sample_group_name), this->model_training_.component_group_names_.at(nodes_iter)); else value = this->model_validation_.getRandomConcentration( this->model_validation_.metabolomicsData_.at(sample_group_name), this->model_validation_.component_group_names_.at(nodes_iter)); input_data(batch_iter, memory_iter, nodes_iter) = value; } // convert the label to a one hot vector Eigen::Tensor<TensorT, 1> one_hot_vec((int)this->model_training_.labels_.size()); if (train) one_hot_vec = OneHotEncoder<std::string, TensorT>(this->model_training_.metaData_.at(sample_group_name).condition, this->model_training_.labels_); else one_hot_vec = OneHotEncoder<std::string, TensorT>(this->model_validation_.metaData_.at(sample_group_name).condition, this->model_validation_.labels_); Eigen::Tensor<TensorT, 1> one_hot_vec_smoothed = one_hot_vec.unaryExpr(LabelSmoother<TensorT>(0.01, 0.01)); // MSE or LogLoss only size_t n_labels; if (train) n_labels = this->model_training_.labels_.size(); else n_labels = this->model_validation_.labels_.size(); for (int nodes_iter = 0; nodes_iter < n_labels; ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = one_hot_vec_smoothed(nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter + (int)n_labels) = one_hot_vec(nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = one_hot_vec_smoothed(nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter + (int)n_labels) = one_hot_vec_smoothed(nodes_iter); } } } // update the time_steps time_steps.setConstant(1.0f); } void simulateDataClassConcs(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps, const bool& train) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_loss_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); if (train) assert(n_input_nodes == this->model_training_.component_group_names_.size()); else assert(n_input_nodes == this->model_validation_.component_group_names_.size()); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // pick a random sample group name std::string sample_group_name; int max_replicates = 0; if (train) { sample_group_name = selectRandomElement(this->model_training_.sample_group_names_); max_replicates = this->model_training_.metabolomicsData_.at(sample_group_name).at(this->model_training_.component_group_names_.at(0)).size(); } else { sample_group_name = selectRandomElement(this->model_validation_.sample_group_names_); max_replicates = this->model_validation_.metabolomicsData_.at(sample_group_name).at(this->model_validation_.component_group_names_.at(0)).size(); } // pick a random replicate std::vector<int> replicates; for (int i = 0; i < max_replicates; ++i) { replicates.push_back(i); } const int replicate = selectRandomElement(replicates); // assign the input data for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { TensorT value; if (train) value = this->model_training_.metabolomicsData_.at(sample_group_name).at(this->model_training_.component_group_names_.at(nodes_iter)).at(replicate).calculated_concentration; else value = this->model_validation_.metabolomicsData_.at(sample_group_name).at(this->model_validation_.component_group_names_.at(nodes_iter)).at(replicate).calculated_concentration; input_data(batch_iter, memory_iter, nodes_iter) = value; } // convert the label to a one hot vector Eigen::Tensor<TensorT, 1> one_hot_vec((int)this->model_training_.labels_.size()); if (train) one_hot_vec = OneHotEncoder<std::string, TensorT>(this->model_training_.metaData_.at(sample_group_name).condition, this->model_training_.labels_); else one_hot_vec = OneHotEncoder<std::string, TensorT>(this->model_validation_.metaData_.at(sample_group_name).condition, this->model_validation_.labels_); Eigen::Tensor<TensorT, 1> one_hot_vec_smoothed = one_hot_vec.unaryExpr(LabelSmoother<TensorT>(0.01, 0.01)); // MSE or LogLoss only size_t n_labels; if (train) n_labels = this->model_training_.labels_.size(); else n_labels = this->model_validation_.labels_.size(); for (int nodes_iter = 0; nodes_iter < n_labels; ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = one_hot_vec_smoothed(nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter + (int)n_labels) = one_hot_vec(nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = one_hot_vec_smoothed(nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter + (int)n_labels) = one_hot_vec_smoothed(nodes_iter); } } } // update the time_steps time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { if (simulate_MARs_) simulateDataClassMARs(input_data, loss_output_data, metric_output_data, time_steps, true); else if (sample_concs_) simulateDataClassSampleConcs(input_data, loss_output_data, metric_output_data, time_steps, true); else simulateDataClassConcs(input_data, loss_output_data, metric_output_data, time_steps, true); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { if (simulate_MARs_) simulateDataClassMARs(input_data, loss_output_data, metric_output_data, time_steps, false); else if (sample_concs_) simulateDataClassSampleConcs(input_data, loss_output_data, metric_output_data, time_steps, false); else simulateDataClassConcs(input_data, loss_output_data, metric_output_data, time_steps, false); } BiochemicalReactionModel<TensorT> model_training_; BiochemicalReactionModel<TensorT> model_validation_; bool sample_concs_ = false; bool simulate_MARs_ = true; }; template<typename TensorT> class MetDataSimBatchCorrection : public DataSimulator<TensorT> { public: void simulateDataReconMARs(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps, const bool& train) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_loss_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); int n_input_pixels; if (train) n_input_pixels = this->model_training_batch_1_.reaction_ids_.size(); else n_input_pixels = this->model_validation_batch_1_.reaction_ids_.size(); assert(n_loss_output_nodes == n_input_pixels); assert(n_metric_output_nodes % n_input_pixels == 0); assert(n_input_nodes == 2 * n_input_pixels); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // pick a random sample group name std::string sample_group_name; if (train) sample_group_name = selectRandomElement(this->model_training_batch_1_.sample_group_names_); else sample_group_name = selectRandomElement(this->model_validation_batch_2_.sample_group_names_); for (int nodes_iter = 0; nodes_iter < n_input_pixels; ++nodes_iter) { TensorT value_batch_1, value_batch_2; if (train) { value_batch_1 = this->model_training_batch_1_.calculateMAR( this->model_training_batch_1_.metabolomicsData_.at(sample_group_name), this->model_training_batch_1_.biochemicalReactions_.at(this->model_training_batch_1_.reaction_ids_.at(nodes_iter))); value_batch_2 = this->model_training_batch_2_.calculateMAR( this->model_training_batch_2_.metabolomicsData_.at(sample_group_name), this->model_training_batch_2_.biochemicalReactions_.at(this->model_training_batch_2_.reaction_ids_.at(nodes_iter))); } else { value_batch_1 = this->model_validation_batch_1_.calculateMAR( this->model_validation_batch_1_.metabolomicsData_.at(sample_group_name), this->model_validation_batch_1_.biochemicalReactions_.at(this->model_validation_batch_1_.reaction_ids_.at(nodes_iter))); value_batch_2 = this->model_validation_batch_2_.calculateMAR( this->model_validation_batch_2_.metabolomicsData_.at(sample_group_name), this->model_validation_batch_2_.biochemicalReactions_.at(this->model_validation_batch_2_.reaction_ids_.at(nodes_iter))); } input_data(batch_iter, memory_iter, nodes_iter) = value_batch_1; input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = value_batch_2; loss_output_data(batch_iter, memory_iter, nodes_iter) = 0; metric_output_data(batch_iter, memory_iter, nodes_iter) = 0; } } } } void simulateDataReconSampleConcs(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps, const bool& train) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_loss_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); int n_input_pixels; if (train) n_input_pixels = this->model_training_batch_1_.component_group_names_.size(); else n_input_pixels = this->model_validation_batch_1_.component_group_names_.size(); assert(n_loss_output_nodes == n_input_pixels); assert(n_metric_output_nodes % n_input_pixels == 0); assert(n_input_nodes == 2 * n_input_pixels); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // pick a random sample group name std::string sample_group_name; if (train) sample_group_name = selectRandomElement(this->model_training_batch_1_.sample_group_names_); else sample_group_name = selectRandomElement(this->model_validation_batch_1_.sample_group_names_); for (int nodes_iter = 0; nodes_iter < n_input_pixels; ++nodes_iter) { TensorT value_batch_1, value_batch_2; if (train) { value_batch_1 = this->model_training_batch_1_.getRandomConcentration( this->model_training_batch_1_.metabolomicsData_.at(sample_group_name), this->model_training_batch_1_.component_group_names_.at(nodes_iter)); value_batch_2 = this->model_training_batch_2_.getRandomConcentration( this->model_training_batch_2_.metabolomicsData_.at(sample_group_name), this->model_training_batch_2_.component_group_names_.at(nodes_iter)); } else { value_batch_1 = this->model_validation_batch_1_.getRandomConcentration( this->model_validation_batch_1_.metabolomicsData_.at(sample_group_name), this->model_validation_batch_1_.component_group_names_.at(nodes_iter)); value_batch_2 = this->model_validation_batch_2_.getRandomConcentration( this->model_validation_batch_2_.metabolomicsData_.at(sample_group_name), this->model_validation_batch_2_.component_group_names_.at(nodes_iter)); } input_data(batch_iter, memory_iter, nodes_iter) = value_batch_1; input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = value_batch_2; loss_output_data(batch_iter, memory_iter, nodes_iter) = 0; metric_output_data(batch_iter, memory_iter, nodes_iter) = 0; } } } } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { if (simulate_MARs_) simulateDataReconMARs(input_data, loss_output_data, metric_output_data, time_steps, true); else simulateDataReconSampleConcs(input_data, loss_output_data, metric_output_data, time_steps, true); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { if (simulate_MARs_) simulateDataReconMARs(input_data, loss_output_data, metric_output_data, time_steps, false); else simulateDataReconSampleConcs(input_data, loss_output_data, metric_output_data, time_steps, false); } BiochemicalReactionModel<TensorT> model_training_batch_1_; BiochemicalReactionModel<TensorT> model_training_batch_2_; BiochemicalReactionModel<TensorT> model_validation_batch_1_; BiochemicalReactionModel<TensorT> model_validation_batch_2_; bool sample_concs_ = false; bool simulate_MARs_ = true; }; template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: /* @brief Fully connected auto-encoder model */ void makeModelBatchCorrectionAE(Model<TensorT>& model, const int& n_inputs, const int& n_encodings, const bool& linear_scale_input, const bool& log_transform_input, const bool& standardize_input, const bool& add_norm = true, const int& n_en_hidden_0 = 64, const int& n_en_hidden_1 = 0, const int& n_en_hidden_2 = 0, const int& n_de_hidden_0 = 64, const int& n_de_hidden_1 = 0, const int& n_de_hidden_2 = 0) { model.setId(0); model.setName("AE"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, true); // Data pre-processing steps this->addDataPreproccessingSteps(model, "Input", node_names_input, linear_scale_input, log_transform_input, standardize_input); // Add the encoding layers std::vector<std::string> node_names = node_names_input; if (n_en_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names, n_en_hidden_0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_en_hidden_0) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "EN0-Norm", "EN0-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN0-Norm-gain", "EN0-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_en_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_en_hidden_1, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_en_hidden_1) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "EN1-Norm", "EN1-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN1-Norm-gain", "EN1-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_en_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2", node_names, n_en_hidden_2, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_en_hidden_2) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "EN2-Norm", "EN2-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN2-Norm-gain", "EN2-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } // Add the mu and log var layers //std::vector<std::string> node_names_mu = model_builder.addFullyConnected(model, "Mu", "Mu", node_names, n_encodings, // FIXME std::vector<std::string> node_names_mu = model_builder.addSinglyConnected(model, "Mu", "Mu", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_encodings) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, true, true); // FIXME //std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); // Add a link between the mu and the encoding node_names = model_builder.addSinglyConnected(model, "Encoding", "Encoding", node_names_mu, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Add the decoding layers if (n_de_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names, n_de_hidden_0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_de_hidden_0) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "DE0-Norm", "DE0-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE0-Norm-gain", "DE0-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_de_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names, n_de_hidden_1, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_de_hidden_1) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "DE1-Norm", "DE1-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE1-Norm-gain", "DE1-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_de_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names, n_de_hidden_2, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_de_hidden_2) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "DE2-Norm", "DE2-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE2-Norm-gain", "DE2-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } // Add the final output layer //node_names = model_builder.addFullyConnected(model, "Output-AE", "Output-AE", node_names, n_inputs, // FIXME node_names = model_builder.addSinglyConnected(model, "Output-AE", "Output-AE", node_names, n_inputs, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_inputs) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); // Add the inputs std::vector<std::string> node_names_expected = model_builder.addInputNodes(model, "Expected", "Expected", n_inputs, true); // Data pre-processing steps this->addDataPreproccessingSteps(model, "Expected", node_names_expected, linear_scale_input, log_transform_input, standardize_input); // Subtract out the pre-processed input data to test against all 0's model_builder.addSinglyConnected(model, "Output-AE", node_names_expected, node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(-1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, true); // Specify the output node types manually for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } /* @brief Fully connected auto-encoder model */ void makeModelBatchCorrectionClassifier(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, const int& n_encodings, const bool& linear_scale_input, const bool& log_transform_input, const bool& standardize_input, const bool& add_norm = true, const int& n_en_hidden_0 = 64, const int& n_en_hidden_1 = 0, const int& n_en_hidden_2 = 0, const int& n_de_hidden_0 = 64, const int& n_de_hidden_1 = 0, const int& n_de_hidden_2 = 0, const int& n_hidden_0 = 32, const int& n_hidden_1 = 0, const int& n_hidden_2 = 0) { model.setId(0); model.setName("AE"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, true); // Data pre-processing steps this->addDataPreproccessingSteps(model, "Input", node_names_input, linear_scale_input, log_transform_input, standardize_input); // Add the encoding layers std::vector<std::string> node_names = node_names_input; if (n_en_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names, n_en_hidden_0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_en_hidden_0) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "EN0-Norm", "EN0-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN0-Norm-gain", "EN0-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_en_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_en_hidden_1, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_en_hidden_1) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "EN1-Norm", "EN1-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN1-Norm-gain", "EN1-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_en_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2", node_names, n_en_hidden_2, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_en_hidden_2) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "EN2-Norm", "EN2-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN2-Norm-gain", "EN2-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } // Add the mu and log var layers //std::vector<std::string> node_names_mu = model_builder.addFullyConnected(model, "Mu", "Mu", node_names, n_encodings, //FIXME std::vector<std::string> node_names_mu = model_builder.addSinglyConnected(model, "Mu", "Mu", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_encodings) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, true, true); // FIXME //std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); // Add a link between the mu and the encoding node_names = model_builder.addSinglyConnected(model, "Encoding", "Encoding", node_names_mu, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Add the decoding layers if (n_de_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names, n_de_hidden_0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_de_hidden_0) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "DE0-Norm", "DE0-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE0-Norm-gain", "DE0-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_de_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names, n_de_hidden_1, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_de_hidden_1) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "DE1-Norm", "DE1-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE1-Norm-gain", "DE1-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_de_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names, n_de_hidden_2, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_de_hidden_2) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "DE2-Norm", "DE2-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE2-Norm-gain", "DE2-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } // Add the AE Output layer //node_names = model_builder.addFullyConnected(model, "Output-AE", "Output-AE", node_names, n_inputs, // FIXME node_names = model_builder.addSinglyConnected(model, "Output-AE", "Output-AE", node_names, n_inputs, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_inputs) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); // Add the classifier hidden layers if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "FC0", "FC0", node_names, n_hidden_0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_hidden_0) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "FC0-Norm", "FC0-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC0-Norm-gain", "FC0-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "FC1", "FC1", node_names, n_hidden_1, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_hidden_1) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "FC1-Norm", "FC1-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC1-Norm-gain", "FC1-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "FC2", "FC2", node_names, n_hidden_2, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_hidden_2) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "FC2-Norm", "FC2-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC2-Norm-gain", "FC2-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } node_names = model_builder.addFullyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_outputs) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } /* @brief Fully connected classifier */ void makeModelFCClass(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, const bool& linear_scale_input, const bool& log_transform_input, const bool& standardize_input, const bool& add_norm = true, const int& n_hidden_0 = 32, const int& n_hidden_1 = 0, const int& n_hidden_2 = 0) { model.setId(0); model.setName("Classifier"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, true); // Data pre-processing steps this->addDataPreproccessingSteps(model, "Input", node_names, linear_scale_input, log_transform_input, standardize_input); // Add the hidden layers if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "FC0", "FC0", node_names, n_hidden_0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_hidden_0) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "FC0-Norm", "FC0-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC0-Norm-gain", "FC0-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "FC1", "FC1", node_names, n_hidden_1, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_hidden_1) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "FC1-Norm", "FC1-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC1-Norm-gain", "FC1-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "FC2", "FC2", node_names, n_hidden_2, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), //std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), //std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_hidden_2) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); if (add_norm) { node_names = model_builder.addNormalization(model, "FC2-Norm", "FC2-Norm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC2-Norm-gain", "FC2-Norm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, true); } } node_names = model_builder.addFullyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_outputs) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } /* @brief Add data preprocessing steps */ void addDataPreproccessingSteps(Model<TensorT>& model, const std::string& module_name, std::vector<std::string>& node_names, const bool& linear_scale_input, const bool& log_transform_input, const bool& standardize_input) { ModelBuilder<TensorT> model_builder; // Data pre-processing steps if (log_transform_input) { std::string name = "LogScale" + module_name; node_names = model_builder.addSinglyConnected(model, name, name, node_names, node_names.size(), std::shared_ptr<ActivationOp<TensorT>>(new LogOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new LogGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, true); } if (linear_scale_input) { std::string name = "LinearScaleFunctor" + module_name; node_names = model_builder.addLinearScale(model, name, name, node_names, 0, 1, true); } if (standardize_input) { std::string name = "Standardize" + module_name; node_names = model_builder.addNormalization(model, name, name, node_names, true); } } void adaptiveTrainerScheduler( const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<float>& model_errors) { // Check point the model every 1000 epochs if (n_epochs % 500 == 0 && n_epochs != 0) { model_interpreter.getModelResults(model, false, true, false, false); // save the model weights WeightFile<float> weight_data; weight_data.storeWeightValuesCsv(model.getName() + "_" + std::to_string(n_epochs) + "_weights.csv", model.weights_); // save the model and tensors to binary ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } } void trainingModelLogger(const int & n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const TensorT & model_error_train, const TensorT & model_error_test, const Eigen::Tensor<TensorT, 1> & model_metrics_train, const Eigen::Tensor<TensorT, 1> & model_metrics_test) { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1000 == 0) { model_logger.setLogExpectedEpoch(true); model_interpreter.getModelResults(model, true, false, false); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->metric_names_) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values); } void validationModelLogger(const int & n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const TensorT & model_error_train, const TensorT & model_error_test, const Eigen::Tensor<TensorT, 1> & model_metrics_train, const Eigen::Tensor<TensorT, 1> & model_metrics_test) { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1000 == 0) { model_logger.setLogExpectedEpoch(true); model_interpreter.getModelResults(model, true, false, false); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->metric_names_) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values); } }; /// Script to train the batch correction network void main_batchCorrectionAE(const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train_batch_1, const std::string& metabo_data_filename_test_batch_1, const std::string& meta_data_filename_train_batch_1, const std::string& meta_data_filename_test_batch_1, const std::string& metabo_data_filename_train_batch_2, const std::string& metabo_data_filename_test_batch_2, const std::string& meta_data_filename_train_batch_2, const std::string& meta_data_filename_test_batch_2, bool make_model = true, bool simulate_MARs = true, bool sample_concs = true) { // define the multithreading parameters const int n_threads = 1; // define the data simulator BiochemicalReactionModel<float> reaction_model; MetDataSimBatchCorrection<float> metabolomics_data; // Training data batch 1 reaction_model.clear(); reaction_model.readBiochemicalReactions(biochem_rxns_filename, true); reaction_model.readMetabolomicsData(metabo_data_filename_train_batch_1); reaction_model.readMetaData(meta_data_filename_train_batch_1); reaction_model.findComponentGroupNames(); if (simulate_MARs) { reaction_model.findMARs(); reaction_model.findMARs(true, false); reaction_model.findMARs(false, true); reaction_model.removeRedundantMARs(); } reaction_model.findLabels(); metabolomics_data.model_training_batch_1_ = reaction_model; // Training data batch 2 reaction_model.clear(); reaction_model.readBiochemicalReactions(biochem_rxns_filename, true); reaction_model.readMetabolomicsData(metabo_data_filename_train_batch_2); reaction_model.readMetaData(meta_data_filename_train_batch_2); reaction_model.findComponentGroupNames(); if (simulate_MARs) { reaction_model.findMARs(); reaction_model.findMARs(true, false); reaction_model.findMARs(false, true); reaction_model.removeRedundantMARs(); } reaction_model.findLabels(); metabolomics_data.model_training_batch_2_ = reaction_model; // Validation data batch 1 reaction_model.clear(); reaction_model.readBiochemicalReactions(biochem_rxns_filename, true); reaction_model.readMetabolomicsData(metabo_data_filename_test_batch_1); reaction_model.readMetaData(meta_data_filename_test_batch_1); reaction_model.findComponentGroupNames(); if (simulate_MARs) { reaction_model.findMARs(); reaction_model.findMARs(true, false); reaction_model.findMARs(false, true); reaction_model.removeRedundantMARs(); } reaction_model.findLabels(); metabolomics_data.model_validation_batch_1_ = reaction_model; // Validation data batch 1 reaction_model.clear(); reaction_model.readBiochemicalReactions(biochem_rxns_filename, true); reaction_model.readMetabolomicsData(metabo_data_filename_test_batch_2); reaction_model.readMetaData(meta_data_filename_test_batch_2); reaction_model.findComponentGroupNames(); if (simulate_MARs) { reaction_model.findMARs(); reaction_model.findMARs(true, false); reaction_model.findMARs(false, true); reaction_model.removeRedundantMARs(); } reaction_model.findLabels(); metabolomics_data.model_validation_batch_2_ = reaction_model; metabolomics_data.simulate_MARs_ = simulate_MARs; metabolomics_data.sample_concs_ = sample_concs; // Checks for the training and validation data assert(metabolomics_data.model_training_batch_1_.reaction_ids_.size() == metabolomics_data.model_training_batch_2_.reaction_ids_.size()); assert(metabolomics_data.model_validation_batch_1_.reaction_ids_.size() == metabolomics_data.model_validation_batch_2_.reaction_ids_.size()); assert(metabolomics_data.model_training_batch_1_.component_group_names_.size() == metabolomics_data.model_training_batch_2_.component_group_names_.size()); assert(metabolomics_data.model_validation_batch_1_.component_group_names_.size() == metabolomics_data.model_validation_batch_2_.component_group_names_.size()); // Define the model input/output nodes int n_input_nodes; if (simulate_MARs) n_input_nodes = reaction_model.reaction_ids_.size(); else n_input_nodes = reaction_model.component_group_names_.size(); const int n_output_nodes = n_input_nodes; const int encoding_size = 64; // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < n_input_nodes; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } for (int i = 0; i < n_input_nodes; ++i) { char name_char[512]; sprintf(name_char, "Expected_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the reconstruction nodes std::vector<std::string> output_nodes_ae; for (int i = 0; i < n_input_nodes; ++i) { char name_char[512]; sprintf(name_char, "Output-AE_%012d", i); std::string name(name_char); output_nodes_ae.push_back(name); } // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(64); model_trainer.setMemorySize(1); model_trainer.setNEpochsTraining(100000); model_trainer.setNEpochsValidation(0); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true, false, false); model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setPreserveOoO(true); model_trainer.setLossFunctions({ std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }); model_trainer.setLossFunctionGrads({ std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }); model_trainer.setLossOutputNodes({ output_nodes_ae }); model_trainer.setMetricFunctions({ std::make_shared<MAEOp<float>>(MAEOp<float>()) }); model_trainer.setMetricOutputNodes({ output_nodes_ae }); model_trainer.setMetricNames({ "MAE" }); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false); // initialize the model replicator ModelReplicatorExt<float> model_replicator; // define the model Model<float> model; if (make_model) { model_trainer.makeModelBatchCorrectionAE(model, n_input_nodes, encoding_size, true, false, false, false, 0, 0, 0, 0, 0, 0); // normalization type 1 } else { // TODO: load in the trained model } // Train the model std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, metabolomics_data, input_nodes, model_logger, model_interpreters.front()); } /// Script to evaluate the batch correction AE + classifier networks void main_batchCorrectionClassification(const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train, const std::string& metabo_data_filename_test, const std::string& meta_data_filename_train, const std::string& meta_data_filename_test, const std::string& model_ae_weight_filename, const std::string& model_ae_classifier_weights_filename, const std::string& model_classifier_weight_filename, bool simulate_MARs = true, bool sample_concs = true) { // define the multithreading parameters const int n_threads = 1; // define the data simulator BiochemicalReactionModel<float> reaction_model; MetDataSimClassification<float> metabolomics_data; // Training data reaction_model.clear(); reaction_model.readBiochemicalReactions(biochem_rxns_filename, true); reaction_model.readMetabolomicsData(metabo_data_filename_train); reaction_model.readMetaData(meta_data_filename_train); reaction_model.findComponentGroupNames(); if (simulate_MARs) { reaction_model.findMARs(); reaction_model.findMARs(true, false); reaction_model.findMARs(false, true); reaction_model.removeRedundantMARs(); } reaction_model.findLabels(); metabolomics_data.model_training_ = reaction_model; // Validation data reaction_model.clear(); reaction_model.readBiochemicalReactions(biochem_rxns_filename, true); reaction_model.readMetabolomicsData(metabo_data_filename_test); reaction_model.readMetaData(meta_data_filename_test); reaction_model.findComponentGroupNames(); if (simulate_MARs) { reaction_model.findMARs(); reaction_model.findMARs(true, false); reaction_model.findMARs(false, true); reaction_model.removeRedundantMARs(); } reaction_model.findLabels(); metabolomics_data.model_validation_ = reaction_model; metabolomics_data.simulate_MARs_ = simulate_MARs; metabolomics_data.sample_concs_ = sample_concs; // Checks for the training and validation data assert(metabolomics_data.model_training_.reaction_ids_.size() == metabolomics_data.model_validation_.reaction_ids_.size()); assert(metabolomics_data.model_validation_.labels_.size() == metabolomics_data.model_training_.labels_.size()); assert(metabolomics_data.model_training_.component_group_names_.size() == metabolomics_data.model_validation_.component_group_names_.size()); // Define the model input/output nodes int n_input_nodes; if (simulate_MARs) n_input_nodes = reaction_model.reaction_ids_.size(); else n_input_nodes = reaction_model.component_group_names_.size(); const int n_output_nodes = reaction_model.labels_.size(); const int encoding_size = 88; // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < n_input_nodes; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the reconstruction nodes std::vector<std::string> output_nodes; for (int i = 0; i < n_output_nodes; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(64); model_trainer.setMemorySize(1); model_trainer.setNEpochsValidation(100); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true, true, false); model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setPreserveOoO(true); model_trainer.setLossFunctions({ std::make_shared<CrossEntropyWithLogitsLossOp<float>>(CrossEntropyWithLogitsLossOp<float>()), std::make_shared<MSELossOp<float>>(MSELossOp<float>()) }); model_trainer.setLossFunctionGrads({ std::make_shared<CrossEntropyWithLogitsLossGradOp<float>>(CrossEntropyWithLogitsLossGradOp<float>()), std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()) }); model_trainer.setLossOutputNodes({ output_nodes, output_nodes }); model_trainer.setMetricFunctions({ std::shared_ptr<MetricFunctionOp<float>>(new AccuracyMCMicroOp<float>()), std::make_shared<PrecisionMCMicroOp<float>>(PrecisionMCMicroOp<float>()) }); model_trainer.setMetricOutputNodes({ output_nodes, output_nodes }); model_trainer.setMetricNames({ "AccuracyMCMicro", "PrecisionMCMicro" }); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false); // define the models Model<float> model_batch_correction_classifier, model_classifier; model_trainer.makeModelBatchCorrectionClassifier(model_batch_correction_classifier, n_input_nodes, n_output_nodes, encoding_size, true, false, false, false, 0, 0, 0, 0, 0, 0, 32, 0, 0); // normalization type 1 model_trainer.makeModelFCClass(model_classifier, n_input_nodes, n_output_nodes, true, false, false, false, 32, 0, 0); // normalization type 1 // read in the BatchCorrection AE weights WeightFile<float> data; data.loadWeightValuesCsv(model_ae_weight_filename, model_batch_correction_classifier.getWeightsMap()); // read in the Classifier weights data.loadWeightValuesCsv(model_ae_classifier_weights_filename, model_batch_correction_classifier.getWeightsMap()); data.loadWeightValuesCsv(model_classifier_weight_filename, model_classifier.getWeightsMap()); // check that all weights were read in correctly for (auto& weight_map : model_batch_correction_classifier.getWeightsMap()) { if (weight_map.second->getInitWeight()) { std::cout << "Model " << model_batch_correction_classifier.getName() << " Weight " << weight_map.first << " has not be initialized." << std::endl;; } } for (auto& weight_map : model_classifier.getWeightsMap()) { if (weight_map.second->getInitWeight()) { std::cout << "Model " << model_classifier.getName() << " Weight " << weight_map.first << " has not be initialized." << std::endl;; } } // Validate the models std::pair<std::vector<float>, std::vector<float>> model_errors_BCClass = model_trainer.validateModel(model_batch_correction_classifier, metabolomics_data, input_nodes, model_logger, model_interpreters.front()); std::pair<std::vector<float>, std::vector<float>> model_errors_Class = model_trainer.validateModel(model_classifier, metabolomics_data, input_nodes, model_logger, model_interpreters.front()); } void main_loadBinaryModelAndStoreWeightsCsv(const std::string& model_filename) { // load the binarized model Model<float> model; ModelFile<float> model_file; model_file.loadModelBinary(model_filename, model); // save the model weights WeightFile<float> data; data.storeWeightValuesCsv(model.getName() + "_weights.csv", model.weights_); } // Main int main(int argc, char** argv) { // Set the data directories //const std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_KALE/"; const std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_KALE/"; //const std::string data_dir = "/home/user/Data/"; // Make the filenames const std::string biochem_rxns_filename = data_dir + "iJO1366.csv"; // IndustrialStrains0103 Batch correction filenames const std::string metabo_data_filename_train_batch_1 = data_dir + "IndustrialStrains0103_Metabolomics_train_batch_1.csv"; const std::string metabo_data_filename_test_batch_1 = data_dir + "IndustrialStrains0103_Metabolomics_test_batch_1.csv"; const std::string metabo_data_filename_train_batch_2 = data_dir + "IndustrialStrains0103_Metabolomics_train_batch_2.csv"; const std::string metabo_data_filename_test_batch_2 = data_dir + "IndustrialStrains0103_Metabolomics_test_batch_2.csv"; const std::string meta_data_filename_train_batch_1 = data_dir + "IndustrialStrains0103_MetaData_train_batch_1.csv"; const std::string meta_data_filename_test_batch_1 = data_dir + "IndustrialStrains0103_MetaData_test_batch_1.csv"; const std::string meta_data_filename_train_batch_2 = data_dir + "IndustrialStrains0103_MetaData_train_batch_2.csv"; const std::string meta_data_filename_test_batch_2 = data_dir + "IndustrialStrains0103_MetaData_test_batch_2.csv"; // Run the batch correction main_batchCorrectionAE(biochem_rxns_filename, metabo_data_filename_train_batch_1, metabo_data_filename_test_batch_1, meta_data_filename_train_batch_1, meta_data_filename_test_batch_1, metabo_data_filename_train_batch_2, metabo_data_filename_test_batch_2, meta_data_filename_train_batch_2, meta_data_filename_test_batch_2, true, false, true); // IndustrialStrains0103 classification filenames const std::string metabo_data_filename_train = data_dir + "IndustrialStrains0103_Metabolomics_train.csv"; const std::string meta_data_filename_train = data_dir + "IndustrialStrains0103_MetaData_train.csv"; const std::string metabo_data_filename_test = data_dir + "IndustrialStrains0103_Metabolomics_test.csv"; const std::string meta_data_filename_test = data_dir + "IndustrialStrains0103_MetaData_test.csv"; // Model filenames const std::string model_ae_weights_filename = data_dir + "TrainTestData/BatchCorrection/AE_weights.csv"; const std::string model_ae_classifier_weights_filename = data_dir + "TrainTestData/BatchCorrection/AE_Classifier_weights.csv"; const std::string model_classifier_weights_filename = data_dir + "TrainTestData/BatchCorrection/Classifier_weights.csv"; // Run the classification main_batchCorrectionClassification(biochem_rxns_filename, metabo_data_filename_train, metabo_data_filename_test, meta_data_filename_train, meta_data_filename_test, model_ae_weights_filename, model_ae_classifier_weights_filename, model_classifier_weights_filename, false, true); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_SOLVER_H #define EVONET_SOLVER_H #include <unsupported/Eigen/CXX11/Tensor> #include <cmath> #include <random> #include <iostream> #include <cereal/access.hpp> // serialiation of private members #undef min // clashes with std::limit on windows in polymorphic.hpp #undef max // clashes with std::limit on windows in polymorphic.hpp #include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Base class for all solvers. Clipping reference: <NAME>, <NAME>, <NAME> (2013) On the difficulty of training Recurrent Neural Networks arXiv:1211.5063 [cs.LG] Gradient Noise with annealed variance reference: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2015). Adding Gradient Noise Improves Learning for Very Deep Networks, 1–11. Retrieved from http://arxiv.org/abs/1511.06807 <NAME> and <NAME>. 2011. Bayesian learning via stochastic gradient langevin dynamics. In Proceedings of the 28th International Conference on International Conference on Machine Learning (ICML'11), <NAME> and <NAME> (Eds.). Omnipress, USA, 681-688. [TODO: add tests for clipGradient and addGradientNoise] */ template<typename TensorT> class SolverOp { public: SolverOp() = default; SolverOp(const TensorT& gradient_threshold) : gradient_threshold_(gradient_threshold) {}; SolverOp(const TensorT& gradient_threshold, const TensorT& gradient_noise_sigma, const TensorT& gradient_noise_gamma) : gradient_threshold_(gradient_threshold), gradient_noise_sigma_(gradient_noise_sigma), gradient_noise_gamma_(gradient_noise_gamma){}; virtual ~SolverOp() = default; virtual std::string getName() const = 0; void setGradientThreshold(const TensorT& gradient_threshold){gradient_threshold_ = gradient_threshold;}; TensorT getGradientThreshold() const{return gradient_threshold_;}; void setGradientNoiseSigma(const TensorT& gradient_noise_sigma){gradient_noise_sigma_ = gradient_noise_sigma;}; TensorT getGradientNoiseSigma() const{return gradient_noise_sigma_;}; void setGradientNoiseGamma(const TensorT& gradient_noise_gamma){gradient_noise_gamma_ = gradient_noise_gamma;}; TensorT getGradientNoiseGamma() const{return gradient_noise_gamma_;}; void setLearningRate(const TensorT& learning_rate) { learning_rate_ = learning_rate; }; TensorT getLearningRate() const { return learning_rate_; }; virtual std::string getParamsAsStr() const = 0; virtual std::vector<TensorT> getParameters() const = 0; virtual int getNParameters() const = 0; virtual SolverOp<TensorT>* copy() const = 0; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(gradient_threshold_, learning_rate_, gradient_noise_sigma_, gradient_noise_gamma_); } // clipping parameters TensorT gradient_threshold_ = (TensorT)1e6; ///< maximum gradient magnitude TensorT learning_rate_ = (TensorT)1e-3; ///< the learning rate // gradient noise with annealed variance parameters TensorT gradient_noise_sigma_ = (TensorT)0.0; ///< variance before annealing TensorT gradient_noise_gamma_ = (TensorT)0.55; ///< time-dependend annealing factor }; /** @brief SGD Stochastic Gradient Descent Solver. */ template<typename TensorT> class SGDOp: public SolverOp<TensorT> { public: SGDOp() = default; virtual ~SGDOp() = default; SGDOp(const TensorT& learning_rate, const TensorT& momentum): momentum_(momentum){this->setLearningRate(learning_rate);} SGDOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& gradient_threshold) : SolverOp<TensorT>(gradient_threshold), momentum_(momentum) { this->setLearningRate(learning_rate); } SGDOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& gradient_threshold, const TensorT& gradient_noise_sigma, const TensorT& gradient_noise_gamma) : SolverOp<TensorT>(gradient_threshold, gradient_noise_sigma, gradient_noise_gamma),momentum_(momentum) {this->setLearningRate(learning_rate);} void setMomentum(const TensorT& momentum){momentum_ = momentum;}; TensorT getMomentum() const{return momentum_;}; void setMomentumPrev(const TensorT& momentum_prev){momentum_prev_ = momentum_prev;}; TensorT getMomentumPrev() const{return momentum_prev_;}; std::string getName() const{return "SGDOp";}; std::string getParamsAsStr() const { std::string params = ""; params += "gradient_threshold:" + std::to_string(this->getGradientThreshold()) + ";gradient_noise_sigma:" + std::to_string(this->getGradientNoiseSigma()) + ";gradient_noise_gamma:" + std::to_string(this->getGradientNoiseGamma()) + ";learning_rate:" + std::to_string(getLearningRate()) + ";momentum:" + std::to_string(getMomentum()) + ";momentum_prev:" + std::to_string(getMomentumPrev()); return params; } SolverOp<TensorT>* copy() const { return new SGDOp<TensorT>(*this); } std::vector<TensorT> getParameters() const { std::vector<TensorT> parameters = { this->getLearningRate(), momentum_, momentum_prev_}; return parameters; } int getNParameters() const { return 3; }; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<SolverOp<TensorT>>(this), momentum_, momentum_prev_); } TensorT momentum_ = TensorT(0.9); ///< Momentum TensorT momentum_prev_ = TensorT(0.0); }; /** @brief SSD Stochastic Sign Descent Solver. */ template<typename TensorT> class SSDOp : public SolverOp<TensorT> { public: SSDOp() = default; virtual ~SSDOp() = default; SSDOp(const TensorT& learning_rate, const TensorT& momentum) : momentum_(momentum) {this->setLearningRate(learning_rate);} SSDOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& gradient_threshold) : SolverOp<TensorT>(gradient_threshold), momentum_(momentum) {this->setLearningRate(learning_rate);} SSDOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& gradient_threshold, const TensorT& gradient_noise_sigma, const TensorT& gradient_noise_gamma) : SolverOp<TensorT>(gradient_threshold, gradient_noise_sigma, gradient_noise_gamma), momentum_(momentum) {this->setLearningRate(learning_rate);} void setMomentum(const TensorT& momentum) { momentum_ = momentum; }; TensorT getMomentum() const { return momentum_; }; void setMomentumPrev(const TensorT& momentum_prev) { momentum_prev_ = momentum_prev; }; TensorT getMomentumPrev() const { return momentum_prev_; }; std::string getName() const { return "SSDOp"; }; std::string getParamsAsStr() const { std::string params = ""; params += "gradient_threshold:" + std::to_string(this->getGradientThreshold()) + ";gradient_noise_sigma:" + std::to_string(this->getGradientNoiseSigma()) + ";gradient_noise_gamma:" + std::to_string(this->getGradientNoiseGamma()) + ";learning_rate:" + std::to_string(getLearningRate()) + ";momentum:" + std::to_string(getMomentum()) + ";momentum_prev:" + std::to_string(getMomentumPrev()); return params; } SolverOp<TensorT>* copy() const { return new SSDOp<TensorT>(*this); } std::vector<TensorT> getParameters() const { std::vector<TensorT> parameters = { this->getLearningRate(), momentum_, momentum_prev_ }; return parameters; } int getNParameters() const { return 3; }; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<SolverOp<TensorT>>(this), momentum_, momentum_prev_); } TensorT momentum_ = 0.9; ///< Momentum TensorT momentum_prev_ = 0.0; }; /** @brief Adam Solver. References: <NAME>, <NAME>. Adam: A Method for Stochastic Optimization. International Conference for Learning Representations, 2015. */ template<typename TensorT> class AdamOp: public SolverOp<TensorT> { public: AdamOp() = default; virtual ~AdamOp() = default; AdamOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& momentum2, const TensorT& delta) : momentum_(momentum), momentum2_(momentum2), delta_(delta) {this->setLearningRate(learning_rate);}; AdamOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& momentum2, const TensorT& delta, const TensorT& gradient_threshold) : SolverOp<TensorT>(gradient_threshold), momentum_(momentum), momentum2_(momentum2), delta_(delta) {this->setLearningRate(learning_rate);} AdamOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& momentum2, const TensorT& delta, const TensorT& gradient_threshold, const TensorT& gradient_noise_sigma, const TensorT& gradient_noise_gamma) : SolverOp<TensorT>(gradient_threshold, gradient_noise_sigma, gradient_noise_gamma), momentum_(momentum), momentum2_(momentum2), delta_(delta) {this->setLearningRate(learning_rate);} void setMomentum(const TensorT& momentum){momentum_ = momentum;}; TensorT getMomentum() const{return momentum_;}; void setMomentum2(const TensorT& momentum2){momentum2_ = momentum2;}; TensorT getMomentum2() const{return momentum2_;}; void setDelta(const TensorT& delta){delta_ = delta;}; TensorT getDelta() const{return delta_;}; void setMomentumPrev(const TensorT& momentum_prev){momentum_prev_ = momentum_prev;}; TensorT getMomentumPrev() const{return momentum_prev_;}; void setMomentum2Prev(const TensorT& momentum2_prev){momentum2_prev_ = momentum2_prev;}; TensorT getMomentum2Prev() const{return momentum2_prev_;}; std::string getName() const{return "AdamOp";}; std::string getParamsAsStr() const { std::string params = ""; params += "gradient_threshold:" + std::to_string(this->getGradientThreshold()) + ";gradient_noise_sigma:" + std::to_string(this->getGradientNoiseSigma()) + ";gradient_noise_gamma:" + std::to_string(this->getGradientNoiseGamma()) + ";learning_rate:" + std::to_string(getLearningRate()) + ";momentum:" + std::to_string(getMomentum()) + ";momentum2:" + std::to_string(getMomentum2()) + ";delta:" + std::to_string(getDelta()) + ";momentum_prev:" + std::to_string(getMomentumPrev()) + ";momentum2_prev:" + std::to_string(getMomentum2Prev()); return params; } int getNParameters() const { return 6; }; std::vector<TensorT> getParameters() const { std::vector<TensorT> parameters = { this->getLearningRate(), momentum_, momentum2_, delta_, momentum_prev_, momentum2_prev_ }; return parameters; } SolverOp<TensorT>* copy() const { return new AdamOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<SolverOp<TensorT>>(this), momentum_, momentum2_, delta_, momentum_prev_, momentum2_prev_); } TensorT momentum_ = (TensorT)0.9; ///< Momentum TensorT momentum2_ = (TensorT)0.999; ///< Momentum2 TensorT delta_ = (TensorT)1e-8; ///< Delta TensorT momentum_prev_ = (TensorT)0.0; TensorT momentum2_prev_ = (TensorT)0.0; }; /** @brief SVAG Solver. */ template<typename TensorT> class SVAGOp : public SolverOp<TensorT> { public: SVAGOp() = default; virtual ~SVAGOp() = default; SVAGOp(const TensorT& learning_rate, const TensorT& momentum) : momentum_(momentum) { this->setLearningRate(learning_rate); }; SVAGOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& gradient_threshold) : SolverOp<TensorT>(gradient_threshold), momentum_(momentum) { this->setLearningRate(learning_rate); } SVAGOp(const TensorT& learning_rate, const TensorT& momentum, const TensorT& gradient_threshold, const TensorT& gradient_noise_sigma, const TensorT& gradient_noise_gamma) : SolverOp<TensorT>(gradient_threshold, gradient_noise_sigma, gradient_noise_gamma), momentum_(momentum) { this->setLearningRate(learning_rate); } void setMomentum(const TensorT& momentum) { momentum_ = momentum; }; TensorT getMomentum() const { return momentum_; }; void setMomentumPrev(const TensorT& momentum_prev) { momentum_prev_ = momentum_prev; }; TensorT getMomentumPrev() const { return momentum_prev_; }; void setVariancePrev(const TensorT& variance_prev) { variance_prev_ = variance_prev; }; TensorT getVariancePrev() const { return variance_prev_; }; std::string getName() const { return "SVAGOp"; }; std::string getParamsAsStr() const { std::string params = ""; params += "gradient_threshold:" + std::to_string(this->getGradientThreshold()) + ";gradient_noise_sigma:" + std::to_string(this->getGradientNoiseSigma()) + ";gradient_noise_gamma:" + std::to_string(this->getGradientNoiseGamma()) + ";learning_rate:" + std::to_string(getLearningRate()) + ";momentum:" + std::to_string(getMomentum()) + ";momentum_prev:" + std::to_string(getMomentumPrev()) + ";variance_prev:" + std::to_string(getVariancePrev()); return params; } int getNParameters() const { return 4; }; std::vector<TensorT> getParameters() const { std::vector<TensorT> parameters = { this->getLearningRate(), momentum_, momentum_prev_, variance_prev_ }; return parameters; } SolverOp<TensorT>* copy() const { return new SVAGOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<SolverOp<TensorT>>(this), momentum_, momentum_prev_, variance_prev_); } TensorT momentum_ = (TensorT)0.9; ///< Momentum TensorT momentum_prev_ = (TensorT)0.0; TensorT variance_prev_ = (TensorT)0.0; }; /** @brief Dummy solver that prevents weight update. */ template<typename TensorT> class DummySolverOp : public SolverOp<TensorT> { public: DummySolverOp() {}; ~DummySolverOp() {}; std::string getName() const { return "DummySolverOp"; }; std::string getParamsAsStr() const { std::string params = ""; return params; } std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } int getNParameters() const { return 0; }; SolverOp<TensorT>* copy() const { return new DummySolverOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<SolverOp<TensorT>>(this)); } }; /** @brief Random Solver. [TODO: add method body and tests] */ /** @brief Hebian Solver. [TODO: add method body and tests] */ /** @brief SM-G-ABS (Safe mutation gradient) Solver. [TODO: add method body and tests] References: <NAME>, <NAME>, <NAME>, <NAME> (2018). Safe Mutations for Deep and Recurrent Neural Networks through Output Gradients. arXiv:1712.06563 */ } CEREAL_REGISTER_TYPE(EvoNet::SGDOp<float>); CEREAL_REGISTER_TYPE(EvoNet::SSDOp<float>); CEREAL_REGISTER_TYPE(EvoNet::AdamOp<float>); CEREAL_REGISTER_TYPE(EvoNet::SVAGOp<float>); CEREAL_REGISTER_TYPE(EvoNet::DummySolverOp<float>); //CEREAL_REGISTER_TYPE(EvoNet::SGDOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SSDOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::AdamOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SVAGOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::DummySolverOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SGDOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::SSDOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::AdamOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::SVAGOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::DummySolverOp<int>); #endif //EVONET_SOLVER_H<file_sep>#ifndef EVONET_MODELKERNALGPU_H #define EVONET_MODELKERNALGPU_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #include <unsupported/Eigen/CXX11/Tensor> #include <EvoNet/ml/ModelKernal.h> namespace EvoNet { template <typename TensorT> class ModelKernalGpu : ModelKernal<TensorT, Eigen::GpuDevice> { public: using ModelKernal<TensorT, Eigen::GpuDevice>::ModelKernal; bool executeNodeActivation( TensorT* h_node_inputs, TensorT* d_node_inputs, TensorT* h_node_outputs, TensorT* d_node_outputs, TensorT* h_sink_dt, TensorT* d_sink_dt, std::shared_ptr<ActivationTensorOp<TensorT, Eigen::GpuDevice>>& activation_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // check that source and weights lengths match const size_t bytes = batch_size * memory_size * layer_size * sizeof(TensorT); // Copy host to device if (copyHostToDevice) { device.memcpyHostToDevice(d_node_inputs, h_node_inputs, bytes); device.memcpyHostToDevice(d_node_outputs, h_node_outputs, bytes); device.memcpyHostToDevice(d_sink_dt, h_sink_dt, bytes); } // Activate the node net input activation_function->operator()(d_node_inputs, d_node_outputs, batch_size, memory_size, layer_size, time_step, device); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_node_outputs, d_node_outputs, bytes); } return true; }; bool executeNodeDerivative( TensorT* h_node_outputs, TensorT* d_node_outputs, TensorT* h_node_derivative, TensorT* d_node_derivative, std::shared_ptr<ActivationTensorOp<TensorT, Eigen::GpuDevice>>& activation_grad_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // check that source and weights lengths match const size_t bytes = batch_size * memory_size * layer_size * sizeof(TensorT); // Copy host to device if (copyHostToDevice) { device.memcpyHostToDevice(d_node_outputs, h_node_outputs, bytes); // only if testing device.memcpyHostToDevice(d_node_derivative, h_node_derivative, bytes); } // Calculate the derivative of the sink node activation activation_grad_function->operator()(d_node_outputs, d_node_derivative, batch_size, memory_size, layer_size, time_step, device); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_node_derivative, d_node_derivative, bytes); } return true; }; bool executeForwardPropogation( TensorT* h_source_outputs, TensorT* d_source_outputs, TensorT* h_weights, TensorT* d_weights, TensorT* h_sink_inputs, TensorT* d_sink_inputs, std::shared_ptr<IntegrationTensorOp<TensorT, Eigen::GpuDevice>>& sink_integration_function, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Copy host to device std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(TensorT); std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(TensorT); std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(TensorT); if (copyHostToDevice) { device.memcpyHostToDevice(d_source_outputs, h_source_outputs, source_bytes); // only for input nodes device.memcpyHostToDevice(d_weights, h_weights, weight_bytes); device.memcpyHostToDevice(d_sink_inputs, h_sink_inputs, sink_bytes); } // Integrate sink node input sink_integration_function->operator()(d_source_outputs, d_weights, d_sink_inputs, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_sink_inputs, d_sink_inputs, sink_bytes); } return true; }; bool executeBackwardPropogation( TensorT* h_source_errors, TensorT* d_source_errors, TensorT* h_source_inputs, TensorT* d_source_inputs, TensorT* h_sink_output, TensorT* d_sink_output, TensorT* h_weights, TensorT* d_weights, TensorT* h_sink_error, TensorT* d_sink_error, TensorT* h_sink_derivative, TensorT* d_sink_derivative, const int& n_input_nodes, std::shared_ptr<IntegrationErrorTensorOp<TensorT, Eigen::GpuDevice>>& source_integration_functions, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Copy host to device std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(TensorT); std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(TensorT); std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(TensorT); if (copyHostToDevice) { device.memcpyHostToDevice(d_source_errors, h_source_errors, source_bytes); // only once device.memcpyHostToDevice(d_source_inputs, h_source_inputs, source_bytes); // only when testing device.memcpyHostToDevice(d_weights, h_weights, weight_bytes); // only when testing device.memcpyHostToDevice(d_sink_output, h_sink_output, sink_bytes); // only when testing device.memcpyHostToDevice(d_sink_derivative, h_sink_derivative, sink_bytes); // only when testing device.memcpyHostToDevice(d_sink_error, h_sink_error, sink_bytes); // only once } // Integrate sink node error source_integration_functions->operator()( d_source_errors, d_source_inputs, d_weights, d_sink_output, d_sink_error, d_sink_derivative, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_sink_error, d_sink_error, sink_bytes); } return true; }; bool executeModelErrors( Eigen::Tensor<TensorT, 2>& expected, TensorT* h_node_outputs, TensorT* d_node_outputs, TensorT* h_model_error, TensorT* d_model_error, TensorT* h_node_errors, TensorT* d_node_errors, std::shared_ptr<LossFunctionTensorOp<TensorT, Eigen::GpuDevice>>& loss_function, std::shared_ptr<LossFunctionGradTensorOp<TensorT, Eigen::GpuDevice>>& loss_grad_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Allocate memory for the expected and predicted assert(expected.size() == batch_size * layer_size); const size_t expected_bytes = batch_size * layer_size * sizeof(TensorT); //const size_t expected_bytes = expected.size() * sizeof(TensorT); TensorT* h_expected; TensorT* d_expected; assert(cudaHostAlloc((void**)(&h_expected), expected_bytes, cudaHostAllocDefault) == cudaSuccess); assert(cudaMalloc((void**)(&d_expected), expected_bytes) == cudaSuccess); h_expected = expected.data(); // Copy host to device std::size_t bytes = batch_size * memory_size * layer_size * sizeof(TensorT); std::size_t model_bytes = batch_size * memory_size * sizeof(TensorT); device.memcpyHostToDevice(d_expected, h_expected, expected_bytes); if (copyHostToDevice) { device.memcpyHostToDevice(d_node_outputs, h_node_outputs, bytes); // only when testing device.memcpyHostToDevice(d_node_errors, h_node_errors, bytes); // only once device.memcpyHostToDevice(d_model_error, h_model_error, model_bytes); // only once } // Calculate the model error loss_function->operator()(d_node_outputs, d_expected, d_model_error, batch_size, memory_size, layer_size, time_step, device); // Calculate the node errors loss_grad_function->operator()(d_node_outputs, d_expected, d_node_errors, batch_size, memory_size, layer_size, time_step, device); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_node_errors, d_node_errors, bytes); // only once device.memcpyDeviceToHost(h_model_error, d_model_error, model_bytes); // only once } // Deallocate the memory //assert(cudaFreeHost(h_expected) == cudaSuccess); // still owned by expected assert(cudaFree(d_expected) == cudaSuccess); return true; }; bool executeModelMetric( Eigen::Tensor<TensorT, 2>& expected, TensorT* h_node_output, TensorT* d_node_output, TensorT* h_model_metric, TensorT* d_model_metric, std::shared_ptr<MetricFunctionTensorOp<TensorT, Eigen::GpuDevice>>& metric_function, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Allocate memory for the expected and predicted assert(expected.size() == batch_size * layer_size); const size_t expected_bytes = batch_size * layer_size * sizeof(TensorT); //const size_t expected_bytes = expected.size() * sizeof(TensorT); TensorT* h_expected; TensorT* d_expected; assert(cudaHostAlloc((void**)(&h_expected), expected_bytes, cudaHostAllocDefault) == cudaSuccess); assert(cudaMalloc((void**)(&d_expected), expected_bytes) == cudaSuccess); h_expected = expected.data(); // Copy host to device std::size_t bytes = batch_size * memory_size * layer_size * sizeof(TensorT); std::size_t model_bytes = n_metrics * memory_size * sizeof(TensorT); device.memcpyHostToDevice(d_expected, h_expected, expected_bytes); if (copyHostToDevice) { device.memcpyHostToDevice(d_node_output, h_node_output, bytes); // only when testing device.memcpyHostToDevice(d_model_metric, h_model_metric, model_bytes); // only once } // Calculate the model metric metric_function->operator()(d_node_output, d_expected, d_model_metric, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_model_metric, d_model_metric, model_bytes); // only once } // Deallocate the memory //assert(cudaFreeHost(h_expected) == cudaSuccess); // still owned by expected assert(cudaFree(d_expected) == cudaSuccess); return true; }; bool executeWeightErrors( TensorT* h_sink_errors, TensorT* d_sink_errors, TensorT* h_source_outputs, TensorT* d_source_outputs, TensorT* h_source_inputs, TensorT* d_source_inputs, const int& n_input_nodes, std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, Eigen::GpuDevice>>& sink_integration_function, TensorT* h_weight, TensorT* d_weight, TensorT* h_weight_error, TensorT* d_weight_error, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Copy host to device std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(TensorT); std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(TensorT); std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(TensorT); if (copyHostToDevice) { device.memcpyHostToDevice(d_source_inputs, h_source_inputs, source_bytes); // only needed when testing... device.memcpyHostToDevice(d_source_outputs, h_source_outputs, source_bytes); // only needed when testing... device.memcpyHostToDevice(d_sink_errors, h_sink_errors, sink_bytes); // only needed when testing... device.memcpyHostToDevice(d_weight_error, h_weight_error, weight_bytes); device.memcpyHostToDevice(d_weight, h_weight, weight_bytes); // only needed when testing... } // Accumulate the error for all links involving the same weight sink_integration_function->operator()(d_sink_errors, d_source_outputs, d_weight, d_source_inputs, d_weight_error, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, device); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_weight_error, d_weight_error, weight_bytes); // only needed when testing... } return true; }; bool executeSharedWeightErrors( TensorT* h_weight_error, TensorT* d_weight_error, TensorT* h_shared_weights, TensorT* d_shared_weights, const int& source_layer_size, const int& sink_layer_size, const int& n_shared_layers, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { if (n_shared_layers == 0) return true; // Copy host to device std::size_t error_bytes = source_layer_size * source_layer_size * sizeof(TensorT); std::size_t shared_weights_bytes = source_layer_size * sink_layer_size * n_shared_layers * sizeof(TensorT); if (copyHostToDevice) { device.memcpyHostToDevice(d_weight_error, h_weight_error, error_bytes); // only needed when testing... device.memcpyHostToDevice(d_shared_weights, h_shared_weights, shared_weights_bytes); // only needed when testing... } // Pool the shared weights erros this->combineSharedWeightErrors(d_weight_error, d_shared_weights, source_layer_size, sink_layer_size, n_shared_layers, device); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_weight_error, d_weight_error, error_bytes); // only needed when testing... } return true; }; bool executeWeightUpdate( TensorT* h_weight, TensorT* d_weight, TensorT* h_solver_params, TensorT* d_solver_params, TensorT* h_weight_error, TensorT* d_weight_error, std::shared_ptr<SolverTensorOp<TensorT, Eigen::GpuDevice>>& solver_function, const int& source_layer_size, const int& sink_layer_size, const int& iter, Eigen::GpuDevice& device, bool copyHostToDevice = false, bool copyDeviceToHost = false) override { // Check for a dummy solver if (solver_function->getName() == "DummySolverTensorOp") return true; // Copy host to device const size_t bytes = source_layer_size * sink_layer_size * sizeof(TensorT); const size_t solver_params_bytes = source_layer_size * sink_layer_size * 3 * sizeof(TensorT); if (copyHostToDevice) { device.memcpyHostToDevice(d_solver_params, h_solver_params, solver_params_bytes); device.memcpyHostToDevice(d_weight_error, h_weight_error, bytes); // only needed when testing... device.memcpyHostToDevice(d_weight, h_weight, bytes); // only needed when testing... } // Update the weights solver_function->operator()(d_weight, d_weight_error, d_solver_params, source_layer_size, sink_layer_size, iter, device);//getDrop()*error); // Copy device to host if (copyDeviceToHost) { device.memcpyDeviceToHost(h_weight, d_weight, bytes); // only needed at the end of training... device.memcpyDeviceToHost(h_solver_params, d_solver_params, solver_params_bytes); // only needed at the end of training... } return true; }; }; } #endif #endif //EVONET_MODELKERNALGPU_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE MNISTSimulator test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/MNISTSimulator.h> #include <EvoNet/test_config.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(MNISTSimulator1) template<typename TensorT> class MNISTSimulatorExt : public MNISTSimulator<TensorT> { public: void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override {} void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override {} void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) override {} }; BOOST_AUTO_TEST_CASE(constructor) { MNISTSimulatorExt<float>* ptr = nullptr; MNISTSimulatorExt<float>* nullPointer = nullptr; ptr = new MNISTSimulatorExt<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { MNISTSimulatorExt<float>* ptr = nullptr; ptr = new MNISTSimulatorExt<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(ReverseInt) { MNISTSimulatorExt<float> datasimulator; BOOST_CHECK_EQUAL(datasimulator.ReverseInt(0), 0); BOOST_CHECK_EQUAL(datasimulator.ReverseInt(1), 16777216); } BOOST_AUTO_TEST_CASE(ReadMNIST) { MNISTSimulatorExt<float> datasimulator; // MNIST metadata const std::size_t input_size = 784; const std::size_t training_data_size = 10; //60000; const std::size_t validation_data_size = 10; //10000; std::string training_data_filename = EVONET_GET_TEST_DATA_PATH("train-images.idx3-ubyte"); std::string training_labels_filename = EVONET_GET_TEST_DATA_PATH("train-labels.idx1-ubyte"); // Read training data Eigen::Tensor<float, 2> mnist_train(training_data_size, input_size); datasimulator.ReadMNIST(training_data_filename, mnist_train, false); // Read in the training labels Eigen::Tensor<float, 2> mnist_train_labels(training_data_size, 1); datasimulator.ReadMNIST(training_labels_filename, mnist_train_labels, true); std::vector<float> labels_expected = { 5,0,4,1,9,2,1,3,1,4 }; for (int i = 0; i < training_data_size; ++i) { // UNCOMMENT to convince yourself that the images are what they should be... //std::cout << "Reshape:\n" << mnist_train.reshape(Eigen::array<Eigen::Index, 3>({ training_data_size,28,28 })).chip(i, 0) << std::endl; //std::cout << "Label:\n" << mnist_train_labels.chip(i, 0) << std::endl; BOOST_CHECK_EQUAL(mnist_train_labels(i, 0), labels_expected.at(i)); } } BOOST_AUTO_TEST_CASE(readData) { MNISTSimulatorExt<float> datasimulator; // MNIST metadata const std::size_t input_size = 784; const std::size_t training_data_size = 10; //60000; const std::size_t validation_data_size = 10; //10000; std::string training_data_filename = EVONET_GET_TEST_DATA_PATH("train-images.idx3-ubyte"); std::string training_labels_filename = EVONET_GET_TEST_DATA_PATH("train-labels.idx1-ubyte"); std::string validation_data_filename = EVONET_GET_TEST_DATA_PATH("t10k-images.idx3-ubyte"); std::string validation_labels_filename = EVONET_GET_TEST_DATA_PATH("t10k-labels.idx1-ubyte"); // Read training data datasimulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // Read validation data datasimulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); std::vector<float> train_labels_expected = { 5,0,4,1,9,2,1,3,1,4 }; std::vector<float> test_labels_expected = { 7,2,1,0,4,1,4,9,5,9 }; for (int i = 0; i < training_data_size; ++i) { // UNCOMMENT to convince yourself that the images are what they should be... //std::cout << "Reshape train images:\n" << datasimulator.training_data.reshape(Eigen::array<Eigen::Index, 3>({ training_data_size,28,28 })).chip(i, 0) << std::endl; //std::cout << "Labels train:\n" << train_labels_expected.at(i) << std::endl; BOOST_CHECK_EQUAL(datasimulator.training_labels(i, train_labels_expected.at(i)), 1); //std::cout << "Reshape test images:\n" << datasimulator.validation_data.reshape(Eigen::array<Eigen::Index, 3>({ validation_data_size,28,28 })).chip(i, 0) << std::endl; //std::cout << "Labels test:\n" << test_labels_expected.at(i) << std::endl; BOOST_CHECK_EQUAL(datasimulator.validation_labels(i, test_labels_expected.at(i)), 1); } } BOOST_AUTO_TEST_CASE(smoothLabels) { MNISTSimulatorExt<float> datasimulator; // MNIST metadata const std::size_t input_size = 784; const std::size_t training_data_size = 10; //60000; const std::size_t validation_data_size = 10; //10000; std::string training_data_filename = EVONET_GET_TEST_DATA_PATH("train-images.idx3-ubyte"); std::string training_labels_filename = EVONET_GET_TEST_DATA_PATH("train-labels.idx1-ubyte"); std::string validation_data_filename = EVONET_GET_TEST_DATA_PATH("t10k-images.idx3-ubyte"); std::string validation_labels_filename = EVONET_GET_TEST_DATA_PATH("t10k-labels.idx1-ubyte"); // Read training data datasimulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // Read validation data datasimulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); // Test datasimulator.smoothLabels(0.1, 0.9); } BOOST_AUTO_TEST_CASE(unitScaleData) { MNISTSimulatorExt<float> datasimulator; // MNIST metadata const std::size_t input_size = 784; const std::size_t training_data_size = 10; //60000; const std::size_t validation_data_size = 10; //10000; std::string training_data_filename = EVONET_GET_TEST_DATA_PATH("train-images.idx3-ubyte"); std::string training_labels_filename = EVONET_GET_TEST_DATA_PATH("train-labels.idx1-ubyte"); std::string validation_data_filename = EVONET_GET_TEST_DATA_PATH("t10k-images.idx3-ubyte"); std::string validation_labels_filename = EVONET_GET_TEST_DATA_PATH("t10k-labels.idx1-ubyte"); // Read training data datasimulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // Read validation data datasimulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); // Test datasimulator.unitScaleData(); } BOOST_AUTO_TEST_CASE(centerUnitScaleData) { MNISTSimulatorExt<float> datasimulator; // MNIST metadata const std::size_t input_size = 784; const std::size_t training_data_size = 10; //60000; const std::size_t validation_data_size = 10; //10000; std::string training_data_filename = EVONET_GET_TEST_DATA_PATH("train-images.idx3-ubyte"); std::string training_labels_filename = EVONET_GET_TEST_DATA_PATH("train-labels.idx1-ubyte"); std::string validation_data_filename = EVONET_GET_TEST_DATA_PATH("t10k-images.idx3-ubyte"); std::string validation_labels_filename = EVONET_GET_TEST_DATA_PATH("t10k-labels.idx1-ubyte"); // Read training data datasimulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // Read validation data datasimulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); // Test datasimulator.centerUnitScaleData(); } BOOST_AUTO_TEST_CASE(corruptTrainingData) { MNISTSimulatorExt<float> datasimulator; // MNIST metadata const std::size_t input_size = 784; const std::size_t training_data_size = 10; //60000; const std::size_t validation_data_size = 10; //10000; std::string training_data_filename = EVONET_GET_TEST_DATA_PATH("train-images.idx3-ubyte"); std::string training_labels_filename = EVONET_GET_TEST_DATA_PATH("train-labels.idx1-ubyte"); std::string validation_data_filename = EVONET_GET_TEST_DATA_PATH("t10k-images.idx3-ubyte"); std::string validation_labels_filename = EVONET_GET_TEST_DATA_PATH("t10k-labels.idx1-ubyte"); // Read training data datasimulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // Read validation data datasimulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); // Test datasimulator.corruptTrainingData(0.25); } BOOST_AUTO_TEST_SUITE_END()<file_sep> #------------------------------------------------------------------------------ ## export a single option indicating if boost static libs should be preferred option(BOOST_USE_STATIC "Use Boost static libraries." ON) #------------------------------------------------------------------------------ ## Wraps the common find boost code into a single call ## @param .. simply add all required components to the call ## @note This macro will define BOOST_MOC_ARGS that should be added to all moc ## calls (see https://bugreports.qt-project.org/browse/QTBUG-22829) macro(find_boost) set(Boost_USE_STATIC_LIBS ${BOOST_USE_STATIC}) set(Boost_USE_MULTITHREADED ON) set(Boost_USE_STATIC_RUNTIME OFF) add_definitions(/DBOOST_ALL_NO_LIB) ## disable auto-linking of boost libs (boost tends to guess wrong lib names) set(Boost_COMPILER "") # help boost finding it's packages set(Boost_ADDITIONAL_VERSIONS "1.48.0" "1.49.0" "1.50.0" "1.51.0" "1.52.0" "1.53.0" "1.54.0") # 1st attempt does not explicitly requires boost to enable second check (see below) find_package(Boost 1.48.0 COMPONENTS ${ARGN}) set(BOOST_MOC_ARGS "") if(Boost_FOUND) # see: https://bugreports.qt-project.org/browse/QTBUG-22829 # Confirmed only on mac os x and leads to problems on win32 and lnx # so we handle it for now only on mac os x and boost versions > 1.52 if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin" OR ${Boost_MINOR_VERSION} GREATER "52") set(BOOST_MOC_ARGS "-DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION") endif() endif() endmacro(find_boost) #------------------------------------------------------------------------------ ## Checks if the user supplied package type is valid and aborts if not ## @param package_type The given package type macro(is_valid_package package_type) list(FIND VALID_PACKAGE_TYPES ${package_type} list_pos) if( ${list_pos} EQUAL -1 ) message(STATUS "The PACKAGE_TYPE ${package_type} is invalid") message(STATUS "Valid PACKAGE_TYPEs are:") foreach( _vpt ${VALID_PACKAGE_TYPES} ) message(STATUS " * ${_vpt}") endforeach() message(FATAL_ERROR "Aborting ...") endif() endmacro() <file_sep># -------------------------------------------------------------------------- # EvoNet # -------------------------------------------------------------------------- # TODO: license # -------------------------------------------------------------------------- # $Maintainer: <NAME> # $Author: <NAME> # -------------------------------------------------------------------------- cmake_minimum_required(VERSION 3.18.0 FATAL_ERROR) #------------------------------------------------------------------------------ # Handle superbuild first #------------------------------------------------------------------------------ option (USE_SUPERBUILD "Whether or not a superbuild should be invoked" ON) if (USE_SUPERBUILD) project ("SuperBuild" NONE) #include(external/boost.cmake) include(external/eigen.cmake) include(external/cereal.cmake) return() # stop processing this file further else() project ("EvoNet_host") endif() #------------------------------------------------------------------------------ # General CMake definitions & helper #------------------------------------------------------------------------------ SET(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS true) # Set C++ version SET(CMAKE_CXX_STANDARD 17) SET(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CUDA_STANDARD 17) set(CMAKE_CUDA_STANDARD_REQUIRED TRUE) #------------------------------------------------------------------------------ ## CMake sanity check: sometimes CMAKE_SIZEOF_VOID_P just vanishes when ## updating CMake. if (NOT CMAKE_SIZEOF_VOID_P) message(FATAL_ERROR "'CMAKE_SIZEOF_VOID_P' is undefined. Thus you should" " delete CMakeFiles (the directory) and the" " CMakeCache.txt and rerun CMake again! This is some" " weird CMake bug that seems to appear when updating the" " CMake version.") endif() #------------------------------------------------------------------------------ # Include the standard Dart testing module #------------------------------------------------------------------------------ # for Nightly Build log and Valgrind for memcheck option (BUILD_TESTS "Whether or not build unit tests" ON) if(BUILD_TESTS) include(CTest) include(Dart) endif() #------------------------------------------------------------------------------ option(MT_ENABLE_TBB "Enable Intel TBB support" OFF) option(MT_ENABLE_OPENMP "Enable OpenMP support" ON) option(BOOST_USE_STATIC "Use Boost static libraries." ON) #------------------------------------------------------------------------------ # Extend module path with our modules # (create an empty list if CMAKE_MODULE_PATH does not exist) if(NOT CMAKE_MODULE_PATH) set(CMAKE_MODULE_PATH) endif() list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules") ## CMake looks for the NSIS script in the module path. list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/Windows") #------------------------------------------------------------------------------ # EvoNet Version Information #------------------------------------------------------------------------------ set (EvoNet_VERSION_MAJOR 1) set (EvoNet_VERSION_MINOR 0) set (EvoNet_VERSION_PATCH 0) #------------------------------------------------------------------------------ # retrieve detailed informations on the working copy (git) include(GetGitRevisionDescription) option(GIT_TRACKING "Embedding of Git version control information into the EvoNet library (and EvoNetInfo tool). Note that if activated, every branch-switch or commit will trigger re-generation of the build system (MakeFiles, VS Solutions etc)." ON) if (GIT_TRACKING) ## will configure an include file git_short_info(EVONET_GIT_SHORT_REFSPEC EVONET_GIT_SHORT_SHA1 EVONET_GIT_LC_DATE) endif() message(STATUS "Building EvoNet ${EVONET_PACKAGE_VERSION_MAJOR}.${EVONET_PACKAGE_VERSION_MINOR}.${EVONET_PACKAGE_VERSION_PATCH}") if(NOT GIT_TRACKING) set(EVONET_GIT_SHORT_SHA1 "disabled") set(EVONET_GIT_SHORT_REFSPEC "disabled") message(STATUS " [CMake is not tracking Git commits and branching. To enable use '-D GIT_TRACKING=ON'.]") elseif(EVONET_GIT_SHORT_REFSPEC EQUAL "GIT-NOTFOUND" OR ${EVONET_GIT_SHORT_REFSPEC} EQUAL "HEAD-HASH-NOTFOUND") set(EVONET_GIT_SHORT_SHA1 "exported") set(EVONET_GIT_SHORT_REFSPEC "exported") message(STATUS " [CMake is not tracking Git commits and branching. Git not found.]") else() # everything found, print some status information message(STATUS " - Repository revision ${EVONET_GIT_SHORT_SHA1}") message(STATUS " - Repository branch ${EVONET_GIT_SHORT_REFSPEC}") message(STATUS " - Repository last change date ${EVONET_GIT_LC_DATE}") message(STATUS " [CMake is tracking Git commits and branching. To disable use '-D GIT_TRACKING=OFF'.]") endif() #------------------------------------------------------------------------------ message(STATUS "Building EvoNet ${EVONET_PACKAGE_VERSION_MAJOR}.${EVONET_PACKAGE_VERSION_MINOR}.${EVONET_PACKAGE_VERSION_PATCH}") #------------------------------------------------------------------------------ # Unsupported systems and other nuances #------------------------------------------------------------------------------ if (MINGW OR MSYS) message(FATAL_ERROR "MSYS and/or MinGW are not supported! Please use a Visual Studio environment! See Windows build instructions for further information!") endif() # Handle large template libraries with WIN32 IF(WIN32) #ADD_DEFINITIONS(/bigobj) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj") SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj") SET(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /bigobj") SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj") SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /bigobj") ENDIF(WIN32) #------------------------------------------------------------------------------ # Multi-threading stuff (OpenMP, CUDA, TBB) #------------------------------------------------------------------------------ include(external/multithreading.cmake) if (EVONET_CUDA) find_package(CUDA REQUIRED) if(CUDA_FOUND) message(STATUS "Found CUDA Toolkit version ${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" ) endif() enable_language(CUDA) if (EVONET_CUDA_ARCHITECTURES) set(CMAKE_CUDA_ARCHITECTURES ${EVONET_CUDA_ARCHITECTURES}) endif() set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} --expt-relaxed-constexpr" ) set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} --expt-extended-lambda" ) if(WIN32) set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler \"/bigobj\"") endif() add_compile_definitions(COMPILE_WITH_CUDA=true) endif() #------------------------------------------------------------------------------ # Check architecture 32bit vs. 64bit #------------------------------------------------------------------------------ if (CMAKE_SIZEOF_VOID_P MATCHES "8") set(EVONET_64BIT_ARCHITECTURE 1 CACHE INTERNAL "Architecture-bits") message(STATUS "Architecture: 64 bit") else() set(EVONET_64BIT_ARCHITECTURE 0 CACHE INTERNAL "Architecture-bits") message(STATUS "Architecture: 32 bit") endif() #------------------------------------------------------------------------------ # Handle build type #------------------------------------------------------------------------------ # Set default build type (if not set by user on command line) if (NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release) endif() # Force build type into the cache (needs to be set beforehand) set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE) if (CMAKE_BUILD_TYPE EQUAL Debug) set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Wall") set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG}") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS}") elseif (CMAKE_BUILD_TYPE EQUAL Release) set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wall") endif() #------------------------------------------------------------------------------ # Enable STL debug mode (GCC only) #------------------------------------------------------------------------------ option(STL_DEBUG "[GCC only] Enable STL-DEBUG mode (very slow)." OFF) if(STL_DEBUG) include(cmake/stl_debug.cmake) endif() #------------------------------------------------------------------------------ # Enable AddressSanitizer #------------------------------------------------------------------------------ option(ADDRESS_SANITIZER "[Clang/GCC only] Enable AddressSanitizer mode (quite slow)." OFF) if(ADDRESS_SANITIZER) include(cmake/AddressSanitizer.cmake) endif() #------------------------------------------------------------------------------ # Host directory for referencing from subprojects set(EVONET_HOST_DIRECTORY "${PROJECT_SOURCE_DIR}") set(EVONET_HOST_BINARY_DIRECTORY "${PROJECT_BINARY_DIR}") #------------------------------------------------------------------------------ # CMake Utitlities #------------------------------------------------------------------------------ # include some EvoNet specific macros include (${PROJECT_SOURCE_DIR}/cmake/build_system_macros.cmake) # .. and some to ease the installation process include (${PROJECT_SOURCE_DIR}/cmake/install_macros.cmake) # .. and some to ease preparing the documentation builds include (${PROJECT_SOURCE_DIR}/cmake/doc_macros.cmake) # .. and some to ease the installation process include (${PROJECT_SOURCE_DIR}/cmake/add_library_macros.cmake) # .. and some to ease the export process include (${PROJECT_SOURCE_DIR}/cmake/export_macros.cmake) #------------------------------------------------------------------------------ # Output directories set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${EVONET_HOST_BINARY_DIRECTORY}/lib") set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${EVONET_HOST_BINARY_DIRECTORY}/bin") set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${EVONET_HOST_BINARY_DIRECTORY}/lib") set(EVONET_BINARY_DIR "${EVONET_HOST_BINARY_DIRECTORY}/bin") set(EVONET_WIN32_DLL_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) #------------------------------------------------------------------------------ # define installation subdirectories to allow for custom installations # note that all those directories are below CMAKE_INSTALL_PREFIX set(INSTALL_BIN_DIR bin CACHE PATH "Installation directory for executables") set(INSTALL_INCLUDE_DIR include CACHE PATH "Installation directory for header files") set(INSTALL_DOC_DIR share/doc CACHE PATH "Installation directory for documentation") set(INSTALL_SHARE_DIR share/EvoNet CACHE PATH "Installation directory for shared data") if(WIN32) set(DEF_INSTALL_CMAKE_DIR CMake) set(DEF_INSTALL_LIB_DIR bin) else() set(DEF_INSTALL_CMAKE_DIR lib/cmake/EvoNet) set(DEF_INSTALL_LIB_DIR lib) endif() set(INSTALL_CMAKE_DIR ${DEF_INSTALL_CMAKE_DIR} CACHE PATH "Installation directory for CMake files") set(INSTALL_LIB_DIR ${DEF_INSTALL_LIB_DIR} CACHE PATH "Installation directory for libraries") # Never use RPATH. We have other means on every platform. # E.g. not yet compatible with our fix_mac_dependencies script. SET(CMAKE_MACOSX_RPATH FALSE) # macOS-specific variable used when installing own libraries/bins. Use relative paths here. SET(CMAKE_INSTALL_NAME_DIR "@executable_path/../${INSTALL_LIB_DIR}") #------------------------------------------------------------------------------ # lib naming when building with msvc & convenience location of build.bat if (MSVC) ## use EvoNetd.dll in debug mode SET(CMAKE_DEBUG_POSTFIX d) ## copy build.bat to root of binary dir to enable convenient invokation (instead of typing path to source dir all the time) if(NOT ("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")) file(COPY "${PROJECT_SOURCE_DIR}/tools/build.bat" DESTINATION "${PROJECT_BINARY_DIR}") endif() endif() #------------------------------------------------------------------------------ # The actual EvoNet code (headers and source files) #------------------------------------------------------------------------------ add_subdirectory(src) #------------------------------------------------------------------------------ # Documentation #------------------------------------------------------------------------------ option (BUILD_DOCS "Whether or not to build the documentation" OFF) if(BUILD_DOCS) add_subdirectory(docs) endif() #------------------------------------------------------------------------------ # Handle export of targets and install them evonet_export_targets() install_export_targets() #------------------------------------------------------------------------------ # Installation and packaging: #------------------------------------------------------------------------------ # packaging routines: if(NOT "${PACKAGE_TYPE}" STREQUAL "none") include(cmake/package_general.cmake) if("${PACKAGE_TYPE}" STREQUAL "dmg") # .. MacOSX include(cmake/package_dragndrop_dmg.cmake) elseif("${PACKAGE_TYPE}" STREQUAL "rpm") # .. rpms include(cmake/package_rpm.cmake) elseif("${PACKAGE_TYPE}" STREQUAL "deb") # .. debs include(cmake/package_deb.cmake) elseif("${PACKAGE_TYPE}" STREQUAL "nsis") # .. Windows nsis # For now we use the old NSIS script AS IS # i.e. although CMake runs make install etc. NSIS # will use everything from the usual build folder like before. # TODO automate more and let CMake generate install commands # in the script. Unfortunately we have a lot of specific code in the script. include(cmake/package_nsis.cmake) endif() include(CPack) include(cmake/package_components.cmake) endif()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelInterpreter DAG test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> #include <EvoNet/ml/ModelBuilder.h> // comprehensive architecture tests using namespace EvoNet; using namespace std; Model<float> makeModelToy1() { /** * Directed Acyclic Graph Toy Network Model */ Node<float> i1, i2, h1, h2, o1, o2, b1, b2; Link l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4; Weight<float> w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4; Model<float> model_FC_Sum; // Toy network: 1 hidden layer, fully connected, DAG i1 = Node<float>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h2 = Node<float>("3", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("4", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("5", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<float>("6", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<float>("7", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1 = Weight<float>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2 = Weight<float>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3 = Weight<float>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w4 = Weight<float>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb1 = Weight<float>("4", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb2 = Weight<float>("5", weight_init, solver); // input layer + bias l1 = Link("0", "0", "2", "0"); l2 = Link("1", "0", "3", "1"); l3 = Link("2", "1", "2", "2"); l4 = Link("3", "1", "3", "3"); lb1 = Link("4", "6", "2", "4"); lb2 = Link("5", "6", "3", "5"); // weights weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w5 = Weight<float>("6", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w6 = Weight<float>("7", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w7 = Weight<float>("8", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w8 = Weight<float>("9", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb3 = Weight<float>("10", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb4 = Weight<float>("11", weight_init, solver); // hidden layer + bias l5 = Link("6", "2", "4", "6"); l6 = Link("7", "2", "5", "7"); l7 = Link("8", "3", "4", "8"); l8 = Link("9", "3", "5", "9"); lb3 = Link("10", "7", "4", "10"); lb4 = Link("11", "7", "5", "11"); model_FC_Sum.setId(1); model_FC_Sum.addNodes({ i1, i2, h1, h2, o1, o2, b1, b2 }); model_FC_Sum.addWeights({ w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4 }); model_FC_Sum.addLinks({ l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4 }); return model_FC_Sum; } BOOST_AUTO_TEST_SUITE(modelInterpreter_DAG) BOOST_AUTO_TEST_CASE(constructor) { ModelInterpreterDefaultDevice<float>* ptr = nullptr; ModelInterpreterDefaultDevice<float>* nullPointer = nullptr; ptr = new ModelInterpreterDefaultDevice<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ModelInterpreterDefaultDevice<float>* ptr = nullptr; ptr = new ModelInterpreterDefaultDevice<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(constructor1) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); BOOST_CHECK_EQUAL(model_interpreter.getModelResources()[0].getID(), model_resources[0].getID()); BOOST_CHECK_EQUAL(model_interpreter.getModelResources()[0].getNEngines(), model_resources[0].getNEngines()); } BOOST_AUTO_TEST_CASE(gettersAndSetters) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter; model_interpreter.setModelResources(model_resources); BOOST_CHECK_EQUAL(model_interpreter.getModelResources()[0].getID(), model_resources[0].getID()); BOOST_CHECK_EQUAL(model_interpreter.getModelResources()[0].getNEngines(), model_resources[0].getNEngines()); } BOOST_AUTO_TEST_CASE(copy) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter; model_interpreter.setModelResources(model_resources); std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; model_interpreters.push_back(model_interpreter); BOOST_CHECK_EQUAL(model_interpreters[0].getModelResources()[0].getID(), model_resources[0].getID()); BOOST_CHECK_EQUAL(model_interpreters[0].getModelResources()[0].getNEngines(), model_resources[0].getNEngines()); } BOOST_AUTO_TEST_CASE(comparison1) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); ModelInterpreterDefaultDevice<float> model_interpreter_test; //BOOST_CHECK(model_interpreter != model_interpreter_test); // Need to fix '==' operator in `ModelInterpreter` model_interpreter_test.setModelResources(model_resources); BOOST_CHECK(model_interpreter == model_interpreter_test); } /** * Part 1 test suit for the Model class * * The following test methods that are * required of a standard feed-forward neural network */ Model<float> model_getNextInactiveLayer = makeModelToy1(); BOOST_AUTO_TEST_CASE(getNextInactiveLayerWOBiases) { // Toy network: 1 hidden layer, fully connected, DAG // Model<float> model_FC_Sum = makeModelToy1(); ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // get the next hidden layer std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getNextInactiveLayer, FP_operations_map, FP_operations_list); BOOST_CHECK_EQUAL(FP_operations_map.size(), 2); BOOST_CHECK_EQUAL(FP_operations_map.at("2"), 0); BOOST_CHECK_EQUAL(FP_operations_map.at("3"), 1); BOOST_CHECK_EQUAL(FP_operations_list.size(), 2); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 2); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].weight->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_list[1].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments.size(), 2); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].weight->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].weight->getName(), "3"); } Model<float> model_getNextInactiveLayerBiases = makeModelToy1(); BOOST_AUTO_TEST_CASE(getNextInactiveLayerBiases) { // Toy network: 1 hidden layer, fully connected, DAG // Model<float> model_FC_Sum = makeModelToy1(); ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // get the next hidden layer std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getNextInactiveLayerBiases, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_getNextInactiveLayerBiases, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); BOOST_CHECK_EQUAL(FP_operations_map.size(), 2); BOOST_CHECK_EQUAL(FP_operations_map.at("2"), 0); BOOST_CHECK_EQUAL(FP_operations_map.at("3"), 1); BOOST_CHECK_EQUAL(FP_operations_list.size(), 2); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 3); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].weight->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].weight->getName(), "4"); BOOST_CHECK_EQUAL(FP_operations_list[1].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments.size(), 3); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].weight->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].weight->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].weight->getName(), "5"); BOOST_CHECK_EQUAL(sink_nodes_with_biases2.size(), 2); BOOST_CHECK_EQUAL(sink_nodes_with_biases2[0], "2"); BOOST_CHECK_EQUAL(sink_nodes_with_biases2[1], "3"); } Model<float> model_getNextInactiveLayerCycles = makeModelToy1(); BOOST_AUTO_TEST_CASE(getNextInactiveLayerCycles) { // Toy network: 1 hidden layer, fully connected, DAG // Model<float> model_FC_Sum = makeModelToy1(); ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // get the next hidden layer std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getNextInactiveLayerCycles, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_getNextInactiveLayerCycles, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::set<std::string> sink_nodes_with_cycles; model_interpreter.getNextInactiveLayerCycles(model_getNextInactiveLayerCycles, FP_operations_map, FP_operations_list, sink_nodes_with_cycles); BOOST_CHECK_EQUAL(FP_operations_map.size(), 2); BOOST_CHECK_EQUAL(FP_operations_map.at("2"), 0); BOOST_CHECK_EQUAL(FP_operations_map.at("3"), 1); BOOST_CHECK_EQUAL(FP_operations_list.size(), 2); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 3); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].weight->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].weight->getName(), "4"); BOOST_CHECK_EQUAL(FP_operations_list[1].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments.size(), 3); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].weight->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].weight->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].weight->getName(), "5"); BOOST_CHECK_EQUAL(sink_nodes_with_cycles.size(), 0); } Model<float> model_pruneInactiveLayerCycles = makeModelToy1(); BOOST_AUTO_TEST_CASE(pruneInactiveLayerCycles) { // Toy network: 1 hidden layer, fully connected, DAG // Model<float> model_FC_Sum = makeModelToy1(); ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // get the next hidden layer std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_pruneInactiveLayerCycles, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_pruneInactiveLayerCycles, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::set<std::string> sink_nodes_with_cycles; std::map<std::string, int> FP_operations_map_cycles = FP_operations_map; std::vector<OperationList<float>> FP_operations_list_cycles = FP_operations_list; model_interpreter.getNextInactiveLayerCycles(model_pruneInactiveLayerCycles, FP_operations_map_cycles, FP_operations_list_cycles, sink_nodes_with_cycles); model_interpreter.pruneInactiveLayerCycles(model_pruneInactiveLayerCycles, FP_operations_map, FP_operations_map_cycles, FP_operations_list, FP_operations_list_cycles, sink_nodes_with_cycles); BOOST_CHECK_EQUAL(FP_operations_map.size(), 2); BOOST_CHECK_EQUAL(FP_operations_map.at("2"), 0); BOOST_CHECK_EQUAL(FP_operations_map.at("3"), 1); BOOST_CHECK_EQUAL(FP_operations_list.size(), 2); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 3); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].weight->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].weight->getName(), "4"); BOOST_CHECK_EQUAL(FP_operations_list[1].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments.size(), 3); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].weight->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[1].weight->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[2].weight->getName(), "5"); } Model<float> model_expandAllForwardPropogationOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(expandAllForwardPropogationOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_expandAllForwardPropogationOperations, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_expandAllForwardPropogationOperations, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); BOOST_CHECK_EQUAL(FP_operations_expanded.size(), 6); BOOST_CHECK_EQUAL(FP_operations_expanded[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[0].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[1].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].weight->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[2].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].weight->getName(), "4"); BOOST_CHECK_EQUAL(FP_operations_expanded[3].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[3].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[3].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[3].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[3].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_expanded[3].arguments[0].weight->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[4].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[4].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[4].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[4].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[4].arguments[0].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[4].arguments[0].weight->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[5].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[5].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[5].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[5].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[5].arguments[0].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_expanded[5].arguments[0].weight->getName(), "5"); } Model<float> model_getFPOpsOoO = makeModelToy1(); BOOST_AUTO_TEST_CASE(getFPOpsOoO) { ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created std::vector<OperationList<float>> FP_operations_expanded; int iter = 0; model_interpreter.getFPOpsOoO_(model_getFPOpsOoO, FP_operations_expanded, iter); BOOST_CHECK_EQUAL(iter, 2); BOOST_CHECK_EQUAL(FP_operations_expanded.size(), 12); BOOST_CHECK_EQUAL(FP_operations_expanded[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[0].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[1].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].weight->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[2].result.sink_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].weight->getName(), "4"); BOOST_CHECK_EQUAL(FP_operations_expanded[3].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[3].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[3].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[3].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[3].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_expanded[3].arguments[0].weight->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[4].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[4].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[4].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[4].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[4].arguments[0].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[4].arguments[0].weight->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[5].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[5].result.sink_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[5].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[5].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[5].arguments[0].source_node->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_expanded[5].arguments[0].weight->getName(), "5"); BOOST_CHECK_EQUAL(FP_operations_expanded[6].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[6].result.sink_node->getName(), "4"); BOOST_CHECK_EQUAL(FP_operations_expanded[6].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[6].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[6].arguments[0].source_node->getName(), "7"); BOOST_CHECK_EQUAL(FP_operations_expanded[6].arguments[0].weight->getName(), "10"); BOOST_CHECK_EQUAL(FP_operations_expanded[7].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[7].result.sink_node->getName(), "4"); BOOST_CHECK_EQUAL(FP_operations_expanded[7].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[7].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[7].arguments[0].source_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[7].arguments[0].weight->getName(), "6"); BOOST_CHECK_EQUAL(FP_operations_expanded[8].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[8].result.sink_node->getName(), "4"); BOOST_CHECK_EQUAL(FP_operations_expanded[8].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[8].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[8].arguments[0].source_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[8].arguments[0].weight->getName(), "8"); BOOST_CHECK_EQUAL(FP_operations_expanded[9].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[9].result.sink_node->getName(), "5"); BOOST_CHECK_EQUAL(FP_operations_expanded[9].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[9].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[9].arguments[0].source_node->getName(), "7"); BOOST_CHECK_EQUAL(FP_operations_expanded[9].arguments[0].weight->getName(), "11"); BOOST_CHECK_EQUAL(FP_operations_expanded[10].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[10].result.sink_node->getName(), "5"); BOOST_CHECK_EQUAL(FP_operations_expanded[10].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[10].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[10].arguments[0].source_node->getName(), "2"); BOOST_CHECK_EQUAL(FP_operations_expanded[10].arguments[0].weight->getName(), "7"); BOOST_CHECK_EQUAL(FP_operations_expanded[11].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[11].result.sink_node->getName(), "5"); BOOST_CHECK_EQUAL(FP_operations_expanded[11].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[11].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[11].arguments[0].source_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[11].arguments[0].weight->getName(), "9"); } Model<float> model_getTensorOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(getTensorOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getTensorOperations, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_getTensorOperations, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(identified_sink_nodes.size(), 6); BOOST_CHECK_EQUAL(identified_sink_nodes.count("2/0"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("2/1"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("2/2"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("3/3"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("3/4"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("3/5"), 1); BOOST_CHECK_EQUAL(tensor_ops.size(), 1); BOOST_CHECK_EQUAL(tensor_ops.at("2/0")[0], 0); BOOST_CHECK_EQUAL(tensor_ops.at("2/0")[1], 1); BOOST_CHECK_EQUAL(tensor_ops.at("2/0")[2], 2); BOOST_CHECK_EQUAL(tensor_ops.at("2/0")[3], 3); BOOST_CHECK_EQUAL(tensor_ops.at("2/0")[4], 4); BOOST_CHECK_EQUAL(tensor_ops.at("2/0")[5], 5); } Model<float> model_getForwardPropogationLayerTensorDimensions = makeModelToy1(); BOOST_AUTO_TEST_CASE(getForwardPropogationLayerTensorDimensions) { ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // change the bias weights to shared model_getForwardPropogationLayerTensorDimensions.links_.at("5")->setWeightName("4"); // Check iteration one with no source/sink/weight tensors already allocated std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); std::map<int, int> max_layer_sizes; std::map<std::string, int> layer_name_pos; std::vector<int> source_layer_sizes, sink_layer_sizes; std::vector<std::vector<std::pair<int, int>>> weight_indices; std::vector<std::map<std::string, std::vector<std::pair<int, int>>>> shared_weight_indices; std::vector<std::vector<float>> weight_values; std::vector<bool> make_source_tensors, make_sink_tensors, make_weight_tensors; std::vector<int> source_layer_pos, sink_layer_pos; int tensor_layers_cnt = 0; int weight_layers_cnt = 0; model_interpreter.getForwardPropogationLayerTensorDimensions(FP_operations_expanded, tensor_ops, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, source_layer_pos, sink_layer_pos, max_layer_sizes, layer_name_pos, tensor_layers_cnt, weight_layers_cnt); BOOST_CHECK_EQUAL(source_layer_sizes.size(), 1); BOOST_CHECK_EQUAL(source_layer_sizes[0], 3); BOOST_CHECK_EQUAL(sink_layer_sizes.size(), 1); BOOST_CHECK_EQUAL(sink_layer_sizes[0], 2); BOOST_CHECK_EQUAL(source_layer_pos.size(), 1); BOOST_CHECK_EQUAL(source_layer_pos.at(0), 1); BOOST_CHECK_EQUAL(sink_layer_pos.size(), 1); BOOST_CHECK_EQUAL(sink_layer_pos.at(0), 0); BOOST_CHECK_EQUAL(max_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(max_layer_sizes.at(0), 1); BOOST_CHECK_EQUAL(max_layer_sizes.at(1), 2); BOOST_CHECK_EQUAL(layer_name_pos.size(), 0); BOOST_CHECK_EQUAL(weight_indices.size(), 1); BOOST_CHECK_EQUAL(weight_indices[0].size(), 6); std::vector<std::pair<int, int>> weight_indices_test = { std::make_pair(0,0),std::make_pair(1,0),std::make_pair(2,0),std::make_pair(0,1), std::make_pair(1,1),std::make_pair(2,1) }; for (int i = 0; i < weight_indices_test.size(); ++i) { BOOST_CHECK_EQUAL(weight_indices[0][i].first, weight_indices_test[i].first); BOOST_CHECK_EQUAL(weight_indices[0][i].second, weight_indices_test[i].second); } BOOST_CHECK_EQUAL(shared_weight_indices.size(), 1); BOOST_CHECK_EQUAL(shared_weight_indices[0].size(), 1); std::map<std::string, std::vector<std::pair<int, int>>> shared_weight_indices_test = { {"4", {std::make_pair(2,1), std::make_pair(2,0)}} }; for (int i = 0; i < shared_weight_indices_test.at("4").size(); ++i) { BOOST_CHECK_EQUAL(shared_weight_indices[0].at("4")[i].first, shared_weight_indices_test.at("4")[i].first); BOOST_CHECK_EQUAL(shared_weight_indices[0].at("4")[i].second, shared_weight_indices_test.at("4")[i].second); } BOOST_CHECK_EQUAL(weight_values.size(), 1); BOOST_CHECK_EQUAL(weight_values[0].size(), 6); std::vector<float> weight_values_test = { 1, 1, 1, 1, 1, 1 }; for (int i = 0; i < weight_values_test.size(); ++i) { BOOST_CHECK_EQUAL(weight_values[0][i], weight_values_test[i]); } BOOST_CHECK_EQUAL(make_source_tensors.size(), 1); BOOST_CHECK(make_source_tensors[0]); BOOST_CHECK_EQUAL(make_sink_tensors.size(), 1); BOOST_CHECK(make_sink_tensors[0]); BOOST_CHECK_EQUAL(make_weight_tensors.size(), 1); BOOST_CHECK(make_weight_tensors[0]); // Check iteration two model_getForwardPropogationLayerTensorDimensions.getNodesMap().at("2")->setStatus(NodeStatus::activated); model_getForwardPropogationLayerTensorDimensions.getNodesMap().at("3")->setStatus(NodeStatus::activated); FP_operations_map.clear(); FP_operations_list.clear(); model_interpreter.getNextInactiveLayerWOBiases(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_list); sink_nodes_with_biases2.clear(); model_interpreter.getNextInactiveLayerBiases(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); FP_operations_expanded.clear(); model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); identified_sink_nodes.clear(); tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); max_layer_sizes.clear(); layer_name_pos.clear(); source_layer_sizes.clear(); sink_layer_sizes.clear(); weight_indices.clear(); shared_weight_indices.clear(); weight_values.clear(); make_source_tensors.clear(); make_sink_tensors.clear(); make_weight_tensors.clear(); source_layer_pos.clear(); sink_layer_pos.clear(); tensor_layers_cnt = 0; weight_layers_cnt = 0; model_interpreter.getForwardPropogationLayerTensorDimensions(FP_operations_expanded, tensor_ops, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, source_layer_pos, sink_layer_pos, max_layer_sizes, layer_name_pos, tensor_layers_cnt, weight_layers_cnt); BOOST_CHECK_EQUAL(source_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(source_layer_sizes[0], 2); BOOST_CHECK_EQUAL(source_layer_sizes[1], 1); BOOST_CHECK_EQUAL(sink_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(sink_layer_sizes[0], 2); BOOST_CHECK_EQUAL(sink_layer_sizes[1], 2); BOOST_CHECK_EQUAL(source_layer_pos.size(), 2); BOOST_CHECK_EQUAL(source_layer_pos.at(0), 0); BOOST_CHECK_EQUAL(source_layer_pos.at(1), 1); BOOST_CHECK_EQUAL(sink_layer_pos.size(), 2); BOOST_CHECK_EQUAL(sink_layer_pos.at(0), 0); BOOST_CHECK_EQUAL(sink_layer_pos.at(1), 0); BOOST_CHECK_EQUAL(max_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(max_layer_sizes.at(0), 1); BOOST_CHECK_EQUAL(max_layer_sizes.at(0), 1); BOOST_CHECK_EQUAL(layer_name_pos.size(), 0); BOOST_CHECK_EQUAL(weight_indices.size(), 2); BOOST_CHECK_EQUAL(weight_indices[0].size(), 4); BOOST_CHECK_EQUAL(weight_indices[1].size(), 2); std::vector<std::vector<std::pair<int, int>>> weight_indices_test2 = { {std::make_pair(0,0),std::make_pair(1,0), std::make_pair(0,1),std::make_pair(1,1)}, {std::make_pair(0,0),std::make_pair(0,1)} }; for (int tensor_iter = 0; tensor_iter < weight_indices_test2.size(); ++tensor_iter) { for (int i = 0; i < weight_indices_test2[tensor_iter].size(); ++i) { BOOST_CHECK_EQUAL(weight_indices[tensor_iter][i].first, weight_indices_test2[tensor_iter][i].first); BOOST_CHECK_EQUAL(weight_indices[tensor_iter][i].second, weight_indices_test2[tensor_iter][i].second); } } BOOST_CHECK_EQUAL(shared_weight_indices.size(), 2); BOOST_CHECK_EQUAL(shared_weight_indices[0].size(), 0); BOOST_CHECK_EQUAL(shared_weight_indices[1].size(), 0); BOOST_CHECK_EQUAL(weight_values.size(), 2); BOOST_CHECK_EQUAL(weight_values[0].size(), 4); BOOST_CHECK_EQUAL(weight_values[1].size(), 2); std::vector<std::vector<float>> weight_values_test2 = { { 1, 1, 1, 1}, {1, 1} }; for (int tensor_iter = 0; tensor_iter < weight_values_test2.size(); ++tensor_iter) { for (int i = 0; i < weight_values_test2[tensor_iter].size(); ++i) { BOOST_CHECK_EQUAL(weight_values[tensor_iter][i], weight_values_test2[tensor_iter][i]); } } BOOST_CHECK_EQUAL(make_source_tensors.size(), 2); BOOST_CHECK(!make_source_tensors[0]); BOOST_CHECK(make_source_tensors[1]); BOOST_CHECK_EQUAL(make_sink_tensors.size(), 2); BOOST_CHECK(make_sink_tensors[0]); BOOST_CHECK(!make_sink_tensors[1]); BOOST_CHECK_EQUAL(make_weight_tensors.size(), 2); BOOST_CHECK(make_weight_tensors[0]); BOOST_CHECK(make_weight_tensors[1]); } /* MISSING TEST COVERAGE: 1. no explicit test coverage for `setForwardPropogationLayerTensors_` - would need to break into seperate functions `getForwardPropogationLayerTensorDimensions_` and `allocateForwardPropogationLayerTensors_` in order to properly test 2. no explicit test coverage for `checkFutureOperations_` and `checkPreviousOperations_` */ /* The following tests test the expected `tensor_ops_steps` and `FP_operations` for more complicated model structures that include Dot product attention, Variational Autoencoder, and Convolution networks */ template<typename TensorT> void makeModelSolution(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, bool specify_layers = false) { model.setId(0); model.setName("AddProbAtt-Solution-NoBiases"); // NOTE: Biases will be non-optimally split when layers are specified ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_random = model_builder.addInputNodes(model, "Random", "Random", n_inputs); std::vector<std::string> node_names_mask = model_builder.addInputNodes(model, "Mask", "Mask", n_inputs); std::shared_ptr<SolverOp<TensorT>> solver; std::shared_ptr<WeightInitOp<TensorT>> weight_init; solver.reset(new DummySolverOp<TensorT>()); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)); // Add the hidden layer std::vector<std::string> node_names = model_builder.addSinglyConnected(model, "HiddenR", "HiddenR", node_names_random, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()), std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>()), weight_init, solver, 0.0f, 0.0f, false, specify_layers); model_builder.addSinglyConnected(model, "HiddenR", node_names_mask, node_names, weight_init, solver, 0.0f, specify_layers); // Add the output layer node_names = model_builder.addFullyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), weight_init, solver, 0.0f, 0.0f, true, true); // always specify the output layer! for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); } template<typename TensorT> void makeModelAttention(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, std::vector<int> n_heads = { 2, 2 }, std::vector<int> key_query_values_lengths = { 4, 4 }, std::vector<int> model_lengths = { 2, 2 }, bool add_FC = true, bool add_skip = true, bool add_norm = false, bool specify_layers = false) { model.setId(0); model.setName("AddProbAtt-DotProdAtt-NoBiases"); // NOTE: Biases will be non-optimally split when layers are specified ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_random = model_builder.addInputNodes(model, "Random", "Random", n_inputs, specify_layers); // Q and V matrices std::vector<std::string> node_names_mask = model_builder.addInputNodes(model, "Mask", "Mask", n_inputs, specify_layers); // K matrix std::vector<std::string> node_names_input = node_names_random; // initial "input" // Multi-head attention std::vector<std::string> node_names; for (size_t i = 0; i < n_heads.size(); ++i) { // Add the attention std::string name_head1 = "Attention" + std::to_string(i); node_names = model_builder.addMultiHeadAttention(model, name_head1, name_head1, node_names_random, node_names_mask, node_names_random, n_heads[i], "DotProd", model_lengths[i], key_query_values_lengths[i], key_query_values_lengths[i], std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names_input.size(), 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, specify_layers); if (add_norm) { std::string norm_name = "Norm" + std::to_string(i); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); node_names = model_builder.addSinglyConnected(model, norm_name + "-gain", norm_name + "-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, specify_layers); } if (add_skip) { std::string skip_name = "Skip" + std::to_string(i); model_builder.addSinglyConnected(model, skip_name, node_names_input, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names_input.size(), 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, specify_layers); } node_names_input = node_names; // Add the feedforward net if (add_FC) { std::string norm_name = "FC" + std::to_string(i); node_names = model_builder.addFullyConnected(model, norm_name, norm_name, node_names_input, n_inputs, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names_input.size(), 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, specify_layers); } if (add_norm) { std::string norm_name = "Norm_FC" + std::to_string(i); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); node_names = model_builder.addSinglyConnected(model, norm_name + "-gain", norm_name + "-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, specify_layers); } //if (add_skip) { // std::string skip_name = "Skip_FC" + std::to_string(i); // model_builder.addSinglyConnected(model, skip_name, node_names_input, node_names, // std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(n_inputs, 2)), // std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f); //} node_names_input = node_names; } // Add the FC layer node_names = model_builder.addFullyConnected(model, "Output", "Output", node_names, n_outputs, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, true, true); for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); } template<typename TensorT> void makeModelVAE(Model<TensorT>& model, int n_inputs = 784, int n_encodings = 64, int n_hidden_0 = 512, bool specify_layer = false) { model.setId(0); model.setName("VAE"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layer); // Add the Endocer FC layers std::vector<std::string> node_names, node_names_mu, node_names_logvar; node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names_input, n_hidden_0, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0, 2 / (int)(node_names_input.size() + node_names.size()))), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names_input.size() + node_names.size()) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8, 10.0)), 0.0f, 0.0f, false, specify_layer); node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_hidden_0, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0, 2 / (int)(node_names.size() + node_names.size()))), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + node_names.size()) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8, 10.0)), 0.0f, 0.0f, false, specify_layer); node_names_mu = model_builder.addFullyConnected(model, "Mu", "Mu", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0, 2 / (int)(node_names.size() + n_encodings))), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_encodings) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8, 10.0)), 0.0f, 0.0f, false, specify_layer); node_names_logvar = model_builder.addFullyConnected(model, "LogVar", "LogVar", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0, 2 / (int)(node_names.size() + n_encodings))), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_encodings) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8, 10.0)), 0.0f, 0.0f, false, specify_layer); // Specify the output node types manually for (const std::string& node_name : node_names_mu) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_logvar) model.nodes_.at(node_name)->setType(NodeType::output); // Add the Encoding layers std::vector<std::string> node_names_encoder = model_builder.addGaussianEncoding(model, "Encoding", "Encoding", node_names_mu, node_names_logvar, specify_layer); // Add the Decoder FC layers node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_encoder, n_hidden_0, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0, 2 / (int)(node_names_encoder.size() + n_hidden_0))), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names_encoder.size() + n_hidden_0) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8, 10.0)), 0.0f, 0.0f, false, specify_layer); node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names, n_hidden_0, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0, 2 / (int)(node_names.size() + n_hidden_0))), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_hidden_0) / 2, 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8, 10.0)), 0.0f, 0.0f, false, specify_layer); node_names = model_builder.addFullyConnected(model, "Output", "Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0, 2 / node_names.size())), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8, 10.0)), 0.0f, 0.0f, false, specify_layer); // Specify the output node types manually for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); } template<typename TensorT> void makeCovNet(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, int n_depth_1 = 32, int n_depth_2 = 2, int n_fc = 128, int filter_size = 5, int pool_size = 2, bool add_norm = false, bool specify_layers = false) { model.setId(0); model.setName("CovNet"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Add the first convolution -> max pool -> ReLU layers std::vector<std::vector<std::string>> node_names_l0; for (size_t d = 0; d < n_depth_1; ++d) { std::vector<std::string> node_names; std::string conv_name = "Conv0-" + std::to_string(d); node_names = model_builder.addConvolution(model, conv_name, "Conv0-" /*conv_name*/, node_names_input, sqrt(node_names_input.size()), sqrt(node_names_input.size()), 0, 0, filter_size, filter_size, 1, 0, 0, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(n_inputs, 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, specify_layers); if (add_norm) { std::string norm_name = "Norm0-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, "Norm0-" /*norm_name*/, node_names, specify_layers); node_names = model_builder.addSinglyConnected(model, norm_name + "-gain", norm_name + "-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, specify_layers); } std::string pool_name = "Pool0-" + std::to_string(d); node_names = model_builder.addConvolution(model, pool_name, "Pool0-" /*pool_name*/, node_names, sqrt(node_names.size()), sqrt(node_names.size()), 1, 1, pool_size, pool_size, 2, 0, 0, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<MaxOp<TensorT>>(MaxOp<float>()), std::make_shared<MaxErrorOp<TensorT>>(MaxErrorOp<TensorT>()), std::make_shared<MaxWeightGradOp<TensorT>>(MaxWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); node_names_l0.push_back(node_names); } // Add the second convolution -> max pool -> ReLU layers std::vector<std::vector<std::string>> node_names_l1; int l_cnt = 0; for (const std::vector<std::string> &node_names_l : node_names_l0) { for (size_t d = 0; d < n_depth_2; ++d) { std::vector<std::string> node_names; std::string conv_name = "Conv1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addConvolution(model, conv_name, "Conv1-" /*conv_name*/, node_names_l, sqrt(node_names_l.size()), sqrt(node_names_l.size()), 0, 0, filter_size, filter_size, 1, 0, 0, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(n_inputs, 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, specify_layers); if (add_norm) { std::string norm_name = "Norm1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, "Norm1-" /*norm_name*/, node_names, specify_layers); node_names = model_builder.addSinglyConnected(model, norm_name + "-gain", norm_name + "-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, specify_layers); } std::string pool_name = "Pool1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addConvolution(model, pool_name, "Pool1-" /*pool_name*/, node_names, sqrt(node_names.size()), sqrt(node_names.size()), 1, 1, pool_size, pool_size, 2, 0, 0, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<MaxOp<TensorT>>(MaxOp<float>()), std::make_shared<MaxErrorOp<TensorT>>(MaxErrorOp<TensorT>()), std::make_shared<MaxWeightGradOp<TensorT>>(MaxWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); node_names_l1.push_back(node_names); } ++l_cnt; } // Linearize the node names std::vector<std::string> node_names; //for (const std::vector<std::string> &node_names_l : node_names_l0) { for (const std::vector<std::string> &node_names_l : node_names_l1) { for (const std::string &node_name : node_names_l) { node_names.push_back(node_name); } } // Add the FC layers //assert(node_names.size() == 320); node_names = model_builder.addFullyConnected(model, "FC0", "FC0", node_names, n_fc, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(180, 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, specify_layers); if (add_norm) { std::string norm_name = "NormFC0"; node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); node_names = model_builder.addSinglyConnected(model, norm_name + "-gain", norm_name + "-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8)), 0.0, 0.0, true, specify_layers); } node_names = model_builder.addFullyConnected(model, "FC1", "FC1", node_names, n_outputs, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(n_fc, 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, false, true); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); } BOOST_AUTO_TEST_CASE(makeModelSolution1) { ModelInterpreterDefaultDevice<float> model_interpreter; // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model_test; makeModelSolution(model_test, 2, 1, true); int iter_test = 0; std::vector<OperationList<float>> FP_operations_expanded_test; model_interpreter.getFPOpsOoO_(model_test, FP_operations_expanded_test, iter_test); std::set<std::string> identified_sink_nodes_test; std::map<std::string, std::vector<int>> tensor_ops_test = model_interpreter.getTensorOperations(FP_operations_expanded_test, identified_sink_nodes_test, true); // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model; makeModelSolution(model, 2, 1, false); int iter = 0; std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.getFPOpsOoO_(model, FP_operations_expanded, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(iter_test, iter); BOOST_CHECK(tensor_ops_test == tensor_ops); BOOST_CHECK(identified_sink_nodes_test == identified_sink_nodes); BOOST_CHECK_EQUAL(FP_operations_expanded_test.size(), FP_operations_expanded.size()); if (tensor_ops_test == tensor_ops && identified_sink_nodes_test == identified_sink_nodes && FP_operations_expanded_test.size() == FP_operations_expanded.size()) { for (int i = 0; i < FP_operations_expanded_test.size(); ++i) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.sink_node->getName(), FP_operations_expanded[i].result.sink_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.time_step, FP_operations_expanded[i].result.time_step); for (int j = 0; j < FP_operations_expanded_test[i].arguments.size(); ++j) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].source_node->getName(), FP_operations_expanded[i].arguments[j].source_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].weight->getName(), FP_operations_expanded[i].arguments[j].weight->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].time_step, FP_operations_expanded[i].arguments[j].time_step); } } } } BOOST_AUTO_TEST_CASE(makeModelAttention1) { ModelInterpreterDefaultDevice<float> model_interpreter; // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model_test; makeModelAttention(model_test, 1, 1, { 2 }, { 3 }, { 1 }, false, false, false, true); int iter_test = 0; std::vector<OperationList<float>> FP_operations_expanded_test; model_interpreter.getFPOpsOoO_(model_test, FP_operations_expanded_test, iter_test); std::set<std::string> identified_sink_nodes_test; std::map<std::string, std::vector<int>> tensor_ops_test = model_interpreter.getTensorOperations(FP_operations_expanded_test, identified_sink_nodes_test, true); // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model; makeModelAttention(model, 1, 1, { 2 }, { 3 }, { 1 }, false, false, false, false); int iter = 0; std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.getFPOpsOoO_(model, FP_operations_expanded, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(iter_test, iter); BOOST_CHECK(tensor_ops_test == tensor_ops); BOOST_CHECK(identified_sink_nodes_test == identified_sink_nodes); BOOST_CHECK_EQUAL(FP_operations_expanded_test.size(), FP_operations_expanded.size()); if (tensor_ops_test == tensor_ops && identified_sink_nodes_test == identified_sink_nodes && FP_operations_expanded_test.size() == FP_operations_expanded.size()) { for (int i = 0; i < FP_operations_expanded_test.size(); ++i) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.sink_node->getName(), FP_operations_expanded[i].result.sink_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.time_step, FP_operations_expanded[i].result.time_step); for (int j = 0; j < FP_operations_expanded_test[i].arguments.size(); ++j) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].source_node->getName(), FP_operations_expanded[i].arguments[j].source_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].weight->getName(), FP_operations_expanded[i].arguments[j].weight->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].time_step, FP_operations_expanded[i].arguments[j].time_step); } } } } BOOST_AUTO_TEST_CASE(makeModelAttention2) { ModelInterpreterDefaultDevice<float> model_interpreter; // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model_test; makeModelAttention(model_test, 1, 1, { 2 }, { 3 }, { 1 }, true, true, false, true); int iter_test = 0; std::vector<OperationList<float>> FP_operations_expanded_test; model_interpreter.getFPOpsOoO_(model_test, FP_operations_expanded_test, iter_test); std::set<std::string> identified_sink_nodes_test; std::map<std::string, std::vector<int>> tensor_ops_test = model_interpreter.getTensorOperations(FP_operations_expanded_test, identified_sink_nodes_test, true); // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model; makeModelAttention(model, 1, 1, { 2 }, { 3 }, { 1 }, true, true, false, false); int iter = 0; std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.getFPOpsOoO_(model, FP_operations_expanded, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(iter_test, iter); BOOST_CHECK(tensor_ops_test == tensor_ops); BOOST_CHECK(identified_sink_nodes_test == identified_sink_nodes); BOOST_CHECK_EQUAL(FP_operations_expanded_test.size(), FP_operations_expanded.size()); if (tensor_ops_test == tensor_ops && identified_sink_nodes_test == identified_sink_nodes && FP_operations_expanded_test.size() == FP_operations_expanded.size()) { for (int i = 0; i < FP_operations_expanded_test.size(); ++i) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.sink_node->getName(), FP_operations_expanded[i].result.sink_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.time_step, FP_operations_expanded[i].result.time_step); for (int j = 0; j < FP_operations_expanded_test[i].arguments.size(); ++j) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].source_node->getName(), FP_operations_expanded[i].arguments[j].source_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].weight->getName(), FP_operations_expanded[i].arguments[j].weight->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].time_step, FP_operations_expanded[i].arguments[j].time_step); } } } } BOOST_AUTO_TEST_CASE(makeModelAttention3) { ModelInterpreterDefaultDevice<float> model_interpreter; // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model_test; makeModelAttention(model_test, 1, 1, { 2 }, { 3 }, { 1 }, true, true, true, true); int iter_test = 0; std::vector<OperationList<float>> FP_operations_expanded_test; model_interpreter.getFPOpsOoO_(model_test, FP_operations_expanded_test, iter_test); std::set<std::string> identified_sink_nodes_test; std::map<std::string, std::vector<int>> tensor_ops_test = model_interpreter.getTensorOperations(FP_operations_expanded_test, identified_sink_nodes_test, true); // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model; makeModelAttention(model, 1, 1, { 2 }, { 3 }, { 1 }, true, true, true, false); int iter = 0; std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.getFPOpsOoO_(model, FP_operations_expanded, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(iter_test, iter); BOOST_CHECK(tensor_ops_test == tensor_ops); BOOST_CHECK(identified_sink_nodes_test == identified_sink_nodes); BOOST_CHECK_EQUAL(FP_operations_expanded_test.size(), FP_operations_expanded.size()); if (tensor_ops_test == tensor_ops && identified_sink_nodes_test == identified_sink_nodes && FP_operations_expanded_test.size() == FP_operations_expanded.size()) { for (int i = 0; i < FP_operations_expanded_test.size(); ++i) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.sink_node->getName(), FP_operations_expanded[i].result.sink_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.time_step, FP_operations_expanded[i].result.time_step); for (int j = 0; j < FP_operations_expanded_test[i].arguments.size(); ++j) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].source_node->getName(), FP_operations_expanded[i].arguments[j].source_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].weight->getName(), FP_operations_expanded[i].arguments[j].weight->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].time_step, FP_operations_expanded[i].arguments[j].time_step); } } } } BOOST_AUTO_TEST_CASE(makeModelVAE1) { ModelInterpreterDefaultDevice<float> model_interpreter; // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model_test; makeModelVAE(model_test, 6, 2, 4, true); int iter_test = 0; std::vector<OperationList<float>> FP_operations_expanded_test; model_interpreter.getFPOpsOoO_(model_test, FP_operations_expanded_test, iter_test); std::set<std::string> identified_sink_nodes_test; std::map<std::string, std::vector<int>> tensor_ops_test = model_interpreter.getTensorOperations(FP_operations_expanded_test, identified_sink_nodes_test, true); // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model; makeModelVAE(model, 6, 2, 4, false); int iter = 0; std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.getFPOpsOoO_(model, FP_operations_expanded, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(iter_test, iter); BOOST_CHECK(tensor_ops_test == tensor_ops); BOOST_CHECK(identified_sink_nodes_test == identified_sink_nodes); BOOST_CHECK_EQUAL(FP_operations_expanded_test.size(), FP_operations_expanded.size()); if (tensor_ops_test == tensor_ops && identified_sink_nodes_test == identified_sink_nodes && FP_operations_expanded_test.size() == FP_operations_expanded.size()) { for (int i = 0; i < FP_operations_expanded_test.size(); ++i) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.sink_node->getName(), FP_operations_expanded[i].result.sink_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.time_step, FP_operations_expanded[i].result.time_step); for (int j = 0; j < FP_operations_expanded_test[i].arguments.size(); ++j) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].source_node->getName(), FP_operations_expanded[i].arguments[j].source_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].weight->getName(), FP_operations_expanded[i].arguments[j].weight->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].time_step, FP_operations_expanded[i].arguments[j].time_step); } } } } BOOST_AUTO_TEST_CASE(makeModelCovNet1) { ModelInterpreterDefaultDevice<float> model_interpreter; // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model_test; makeCovNet(model_test, 4, 2, 2, 2, 3, 2, 2, false, true); int iter_test = 0; std::vector<OperationList<float>> FP_operations_expanded_test; model_interpreter.getFPOpsOoO_(model_test, FP_operations_expanded_test, iter_test); std::set<std::string> identified_sink_nodes_test; std::map<std::string, std::vector<int>> tensor_ops_test = model_interpreter.getTensorOperations(FP_operations_expanded_test, identified_sink_nodes_test, true); // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model; makeCovNet(model, 4, 2, 2, 2, 3, 2, 2, false, false); int iter = 0; std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.getFPOpsOoO_(model, FP_operations_expanded, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(iter_test, iter); BOOST_CHECK(tensor_ops_test == tensor_ops); BOOST_CHECK(identified_sink_nodes_test == identified_sink_nodes); BOOST_CHECK_EQUAL(FP_operations_expanded_test.size(), FP_operations_expanded.size()); if (tensor_ops_test == tensor_ops && identified_sink_nodes_test == identified_sink_nodes && FP_operations_expanded_test.size() == FP_operations_expanded.size()) { for (int i = 0; i < FP_operations_expanded_test.size(); ++i) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.sink_node->getName(), FP_operations_expanded[i].result.sink_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.time_step, FP_operations_expanded[i].result.time_step); for (int j = 0; j < FP_operations_expanded_test[i].arguments.size(); ++j) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].source_node->getName(), FP_operations_expanded[i].arguments[j].source_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].weight->getName(), FP_operations_expanded[i].arguments[j].weight->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].time_step, FP_operations_expanded[i].arguments[j].time_step); } } } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELINTERPRETERFILE_H #define EVONET_MODELINTERPRETERFILE_H // .cpp #include <cereal/archives/binary.hpp> #include <fstream> #include <EvoNet/io/CSVWriter.h> namespace EvoNet { /** @brief ModelInterpreterFile */ template<typename TensorT, typename InterpreterT> class ModelInterpreterFile { public: ModelInterpreterFile() = default; ///< Default constructor ~ModelInterpreterFile() = default; ///< Default destructor /** @brief store ModelInterpreter from file @param filename The name of the model_interpreter file @param model_interpreter The model_interpreter to store @returns Status True on success, False if not */ static bool storeModelInterpreterBinary(const std::string& filename, const InterpreterT& model_interpreter); static bool storeModelInterpreterCsv(const std::string& filename, const InterpreterT& model_interpreter); /** @brief load Model from file @param filename The name of the model_interpreter file @param model_interpreter The model_interpreter to load data into @returns Status True on success, False if not */ static bool loadModelInterpreterBinary(const std::string& filename, InterpreterT& model_interpreter); }; template<typename TensorT, typename InterpreterT> bool ModelInterpreterFile<TensorT, InterpreterT>::storeModelInterpreterBinary(const std::string & filename, const InterpreterT& model_interpreter) { std::ofstream ofs(filename, std::ios::binary); //if (ofs.is_open() == false) {// Lines check to make sure the file is not already created cereal::BinaryOutputArchive oarchive(ofs); oarchive(model_interpreter); ofs.close(); //}// Lines check to make sure the file is not already created return true; } template<typename TensorT, typename InterpreterT> inline bool ModelInterpreterFile<TensorT, InterpreterT>::storeModelInterpreterCsv(const std::string & filename, const InterpreterT& model_interpreter) { CSVWriter csvwriter(filename); // write the headers to the first line const std::vector<std::string> headers = { "Operation", "source_node_name", "source_node_timestep", "weight_name", "sink_node_name", "sink_node_timestep" }; csvwriter.writeDataInRow(headers.begin(), headers.end()); for (const auto& tensor_ops_step : model_interpreter.getTensorOpsSteps()) { for (const auto& tensor_op_map : tensor_ops_step) { for (const auto& tensor_op : tensor_op_map.second) { auto FP_operations = model_interpreter.getFPOperations(); std::string sink_node_name = FP_operations[tensor_op].result.sink_node->getName(); int sink_node_timestep = FP_operations[tensor_op].result.time_step; for (const auto& argument : FP_operations[tensor_op].arguments) { std::vector<std::string> row; row.push_back(tensor_op_map.first); row.push_back(argument.source_node->getName()); row.push_back(std::to_string(argument.time_step)); row.push_back(argument.weight->getName()); row.push_back(sink_node_name); row.push_back(std::to_string(sink_node_timestep)); // write to file csvwriter.writeDataInRow(row.begin(), row.end()); } } } } return true; } template<typename TensorT, typename InterpreterT> bool ModelInterpreterFile<TensorT, InterpreterT>::loadModelInterpreterBinary(const std::string & filename, InterpreterT& model_interpreter) { std::ifstream ifs(filename, std::ios::binary); if (ifs.is_open()) { cereal::BinaryInputArchive iarchive(ifs); iarchive(model_interpreter); ifs.close(); } return true; } } #endif //EVONET_MODELINTERPRETERFILE_H<file_sep># -------------------------------------------------------------------------- # EvoNet: an evolutionary approach to optimize any task # -------------------------------------------------------------------------- # Copyright The EvoNet Team # 2018-2022. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING # INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # -------------------------------------------------------------------------- # $Maintainer: <NAME> $ # $Authors: <NAME> $ # -------------------------------------------------------------------------- # doc generation for EvoNet lib # with doxygen (XML) and sphinx for (RtD) # doxygen find_package(Doxygen REQUIRED) set(EVONET_PUBLIC_HEADER_DIR ${PROJECT_SOURCE_DIR}/../src/evonet/include/EvoNet) get_target_property(${EVONET_PUBLIC_HEADER_DIR} EvoNet INTERFACE_INCLUDE_DIRECTORIES) file(GLOB_RECURSE EVONET_PUBLIC_HEADERS LIST_DIRECTORIES true ${EVONET_PUBLIC_HEADER_DIR}/*.h) set(DOXYGEN_INPUT_DIR ${PROJECT_SOURCE_DIR}/../src/evonet/) set(DOXYGEN_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/docs) set(DOXYGEN_INDEX_FILE ${DOXYGEN_OUTPUT_DIR}/html/index.html ) set(DOXYFILE_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in) set(DOXYFILE_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile) configure_file(${DOXYFILE_IN} ${DOXYFILE_OUT} @ONLY) file(MAKE_DIRECTORY ${DOXYGEN_OUTPUT_DIR}) add_custom_command( OUTPUT ${DOXYGEN_INDEX_FILE} DEPENDS ${EVONET_PUBLIC_HEADERS} COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYFILE_OUT} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} MAIN_DEPENDENCY ${DOXYFILE_IN} ${DOXYFILE_OUT} COMMENT "Generating docs for EvoNet lib") add_custom_target(libdoc-doxygen ALL DEPENDS ${DOXYGEN_INDEX_FILE}) #sphinx find_package(Sphinx REQUIRED) set(SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}) set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}/docs/sphinx) set(SPHINX_INDEX_FILE ${SPHINX_BUILD}/index.html) execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${CMAKE_CURRENT_SOURCE_DIR}/../images ${CMAKE_CURRENT_SOURCE_DIR}/images) add_custom_command( OUTPUT ${SPHINX_INDEX_FILE} COMMAND ${SPHINX_EXECUTABLE} -b html -Dbreathe_projects.EvoNet=${DOXYGEN_OUTPUT_DIR}/xml ${SPHINX_SOURCE} ${SPHINX_BUILD} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/index.rst ${DOXYGEN_INDEX_FILE} MAIN_DEPENDENCY ${SPHINX_SOURCE}/conf.py COMMENT "Generating sphinx docs for EvoNet lib") add_custom_target(docs ALL DEPENDS ${SPHINX_INDEX_FILE}) <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_CSVWRITER_H #define EVONET_CSVWRITER_H #include <unsupported/Eigen/CXX11/Tensor> #include <iostream> #include <fstream> #include <vector> namespace EvoNet { /** @brief CSVWriter based on the following: http://thispointer.com/how-to-write-data-in-a-csv-file-in-c/ */ class CSVWriter { public: CSVWriter(); ///< Default constructor ~CSVWriter(); ///< Default destructor CSVWriter(const std::string& filename, std::string delm = ","); void setFilename(const std::string& filename); ///< filename setter std::string getFilename() const; ///< filename getter void setDelimeter(const std::string& delimeter); ///< delimeter setter std::string getDelimeter() const; ///< delimeter getter void setLineCount(const int& line_count); ///< line_count setter int getLineCount() const; ///< line_count getter /** @brief This Function accepts a range and appends all the elements in the range to the last row, seperated by delimeter (Default is comma) @param first Iterator to the first element @param last Iterator to the last element */ template<typename T> void writeDataInRow(T first, T last) { std::fstream file; // Open the file in truncate mode if first line else in Append Mode file.open(filename_, std::ios::out | (line_count_ ? std::ios::app : std::ios::trunc)); // Iterate over the range and add each lement to file seperated by delimeter. for (; first != last; ) { file << *first; if (++first != last) file << delimeter_; } file << "\n"; line_count_++; // Close the file file.close(); } private: std::string filename_; std::string delimeter_; int line_count_; }; } #endif //EVONET_CSVWRITER_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE BiochemicalDataSimulator test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/BiochemicalDataSimulator.h> #include <EvoNet/test_config.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(biochemicalreaction) template <typename TensorT> class BiochemicalDataSimulatorTest : public BiochemicalDataSimulator<TensorT> { public: void makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override {} void makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override {} void getTrainingDataFromCache(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { this->getTrainingDataFromCache_(input_data, loss_output_data, metric_output_data, time_steps); } void getValidationDataFromCache(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { this->getValidationDataFromCache_(input_data, loss_output_data, metric_output_data, time_steps); } }; BOOST_AUTO_TEST_CASE(constructor) { BiochemicalDataSimulatorTest<float>* ptr = nullptr; BiochemicalDataSimulatorTest<float>* nullPointer = nullptr; ptr = new BiochemicalDataSimulatorTest<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { BiochemicalDataSimulatorTest<float>* ptr = nullptr; ptr = new BiochemicalDataSimulatorTest<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(transformTrainingAndValidationDataOffline) { BiochemicalDataSimulatorTest<float> biochemicalDataSimulator; // Make the dummy training/validation data const int n_features = 2; const int n_samples_training = 4; const int n_samples_validation = 2; Eigen::Tensor<float, 2> data_training(n_features, n_samples_training); Eigen::Tensor<float, 2> data_validation(n_features, n_samples_validation); Eigen::Tensor<float, 2> data_training_expected(n_features, n_samples_training); Eigen::Tensor<float, 2> data_validation_expected(n_features, n_samples_validation); // Test without user defined parameters (no transformation) data_training.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, false, false, false); data_training_expected.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation_expected.setValues({ {0, 1}, {4, 5} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i,j), data_training_expected(i,j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test without user defined parameters (Linear Scale) data_training.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, true, false, false); data_training_expected.setValues({ {0, 0.142857149, 0.285714298, 0.428571433},{0.571428597, 0.714285731, 0.857142866, 1} }); data_validation_expected.setValues({ {0, 0.142857149}, {0.571428597, 0.714285731} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test without user defined parameters (Log Transformation) data_training.setValues({ {0.5, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0.5, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, false, true, false); data_training_expected.setValues({ {-0.69314718, 0, 0.69314718, 1.09861229},{1.38629436, 1.60943791, 1.79175947, 1.94591015} }); data_validation_expected.setValues({ {-0.69314718, 0}, {1.38629436, 1.60943791} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test without user defined parameters (Standardization) data_training.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, false, false, true); data_training_expected.setValues({ {-1.42886901, -1.0206207, -0.612372398, -0.204124138}, {0.204124138, 0.612372398, 1.0206207, 1.42886901} }); data_validation_expected.setValues({ {-1.42886901, -1.0206207}, {0.204124138, 0.612372398} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test without user defined parameters (Log transformation + standardization + linearization) data_training.setValues({ {0.5, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0.5, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, true, true, true); data_training_expected.setValues({ {0, 0.262649536, 0.525299072, 0.678939164}, {0.787948549, 0.872502863, 0.9415887, 1} }); data_validation_expected.setValues({ {0, 0.262649536}, {0.787948549, 0.872502863} }); //todo for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test with user defined parameters (no transformation) data_training.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, false, false, false, true, -1, 1, true, 0, 2); data_training_expected.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation_expected.setValues({ {0, 1}, {4, 5} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test with user defined parameters (Linear Scale) data_training.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, true, false, false, true, -7, 7, true, 0, 2); data_training_expected.setValues({ {0.5, 0.571428597, 0.642857134, 0.714285731}, {0.785714269, 0.857142866, 0.928571403, 1} }); data_validation_expected.setValues({ {0.5, 0.571428597}, {0.785714269, 0.857142866} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test with user defined parameters (Log Transformation) data_training.setValues({ {0.5, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0.5, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, false, true, false, true, -7, 7, true, 0, 2); data_training_expected.setValues({ {-0.69314718, 0, 0.69314718, 1.09861229},{1.38629436, 1.60943791, 1.79175947, 1.94591015} }); data_validation_expected.setValues({ {-0.69314718, 0}, {1.38629436, 1.60943791} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test with user defined parameters (Standardization) data_training.setValues({ {0, 1, 2, 3}, {4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, false, false, true, true, -7, 7, true, 0, 2); data_training_expected.setValues({ {0, 0.707106769, 1.41421354, 2.12132049}, {2.82842708, 3.53553391, 4.24264097, 4.94974756} }); data_validation_expected.setValues({ {0, 0.707106769}, {2.82842708, 3.53553391} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test with user defined parameters (Log transformation + standardization + linearization) data_training.setValues({ {0.5, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0.5, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOffline(data_training, data_validation, true, true, true, true, -7, 7, true, 0, 2); data_training_expected.setValues({ {0.464990795, 0.5, 0.535009205, 0.555488288}, {0.570018411, 0.581288874, 0.590497494, 0.598283291} }); data_validation_expected.setValues({ {0.464990795, 0.5}, {0.570018411, 0.581288874} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } } BOOST_AUTO_TEST_CASE(transformTrainingAndValidationDataOnline) { BiochemicalDataSimulatorTest<float> biochemicalDataSimulator; // Make the dummy training/validation data const int n_features = 2; const int n_samples_training = 4; const int n_samples_validation = 2; Eigen::Tensor<float, 2> data_training(n_features, n_samples_training); Eigen::Tensor<float, 2> data_validation(n_features, n_samples_validation); Eigen::Tensor<float, 2> data_training_expected(n_features, n_samples_training); Eigen::Tensor<float, 2> data_validation_expected(n_features, n_samples_validation); // Test without user defined parameters (no transformation) data_training.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOnline(data_training, data_validation, false, false, false); data_training_expected.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation_expected.setValues({ {0, 1}, {4, 5} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test without user defined parameters (Linear Scale) data_training.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOnline(data_training, data_validation, true, false, false); data_training_expected.setValues({ {0, 0, 0, 0},{1, 1, 1, 1} }); data_validation_expected.setValues({ {0, 0}, {1, 1} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test without user defined parameters (Log Transformation) data_training.setValues({ {0.5, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0.5, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOnline(data_training, data_validation, false, true, false); data_training_expected.setValues({ {-0.69314718, 0, 0.69314718, 1.09861229},{1.38629436, 1.60943791, 1.79175947, 1.94591015} }); data_validation_expected.setValues({ {-0.69314718, 0}, {1.38629436, 1.60943791} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test without user defined parameters (Standardization) data_training.setValues({ {0, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOnline(data_training, data_validation, false, false, true); data_training_expected.setValues({ {-0.707106769, -0.707106769, -0.707106769, -0.707106769}, {0.707106769, 0.707106769, 0.707106769, 0.707106769} }); data_validation_expected.setValues({ {-0.707106769, -0.707106769}, {0.707106769, 0.707106769} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } // Test without user defined parameters (Log transformation + standardization + linearization) data_training.setValues({ {0.5, 1, 2, 3},{4, 5, 6, 7} }); data_validation.setValues({ {0.5, 1}, {4, 5} }); biochemicalDataSimulator.transformTrainingAndValidationDataOnline(data_training, data_validation, true, true, true); data_training_expected.setValues({ {0, 0, 0, 0},{1, 1, 1, 1} }); data_validation_expected.setValues({ {0, 0}, {1, 1} }); for (int i = 0; i < n_features; ++i) { for (int j = 0; j < n_samples_training; ++j) { BOOST_CHECK_CLOSE(data_training(i, j), data_training_expected(i, j), 1e-4); } for (int j = 0; j < n_samples_validation; ++j) { BOOST_CHECK_CLOSE(data_validation(i, j), data_validation_expected(i, j), 1e-4); } } } BOOST_AUTO_TEST_CASE(getTrainingDataFromCache) { BiochemicalDataSimulatorTest<float> biochemicalDataSimulator; } BOOST_AUTO_TEST_CASE(getValidationDataFromCache) { BiochemicalDataSimulatorTest<float> biochemicalDataSimulator; } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/simulator/MNISTSimulator.h> #include <EvoNet/core/Preprocessing.h> #include <fstream> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; /** * EXAMPLES using the MNIST data set */ // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: Model<TensorT> makeModel() { return Model<TensorT>(); } void adaptiveTrainerScheduler( const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<float>& model_errors) { if (n_epochs > 10000) { // update the solver parameters std::shared_ptr<SolverOp<TensorT>> solver; solver.reset(new AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)); for (auto& weight_map : model.getWeightsMap()) if (weight_map.second->getSolverOp()->getName() == "AdamOp<TensorT>") weight_map.second->setSolverOp(solver); } } }; template<typename TensorT> class DataSimulatorExt : public MNISTSimulator<TensorT> { public: void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) {}; void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); assert(n_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == this->validation_data.dimension(1)); // make a vector of sample_indices [BUG FREE] Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, n_epochs); // Reformat the input data for training [BUG FREE] for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter<this->training_data.dimension(1); ++nodes_iter) for (int epochs_iter = 0; epochs_iter<n_epochs; ++epochs_iter) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->training_data(sample_indices[epochs_iter*batch_size + batch_iter], nodes_iter); //input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->training_data(sample_indices[0], nodes_iter); // test on only 1 sample // reformat the output data for training [BUG FREE] for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter<this->training_labels.dimension(1); ++nodes_iter) for (int epochs_iter = 0; epochs_iter<n_epochs; ++epochs_iter) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->training_labels(sample_indices[epochs_iter*batch_size + batch_iter], nodes_iter); //output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->training_labels(sample_indices[0], nodes_iter); // test on only 1 sample time_steps.setConstant(1.0f); } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); assert(n_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == this->validation_data.dimension(1)); // make the start and end sample indices [BUG FREE] Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, n_epochs); // Reformat the input data for validation [BUG FREE] for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter<this->validation_data.dimension(1); ++nodes_iter) for (int epochs_iter = 0; epochs_iter<n_epochs; ++epochs_iter) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->validation_data(sample_indices[epochs_iter*batch_size + batch_iter], nodes_iter); // reformat the output data for validation [BUG FREE] for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) for (int nodes_iter = 0; nodes_iter<this->validation_labels.dimension(1); ++nodes_iter) for (int epochs_iter = 0; epochs_iter<n_epochs; ++epochs_iter) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->validation_labels(sample_indices[epochs_iter*batch_size + batch_iter], nodes_iter); time_steps.setConstant(1.0f); } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> { public: void adaptiveReplicatorScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // TODO } }; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> { public: void adaptivePopulationScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // Population size of 16 if (n_generations == 0) { this->setNTop(3); this->setNRandom(3); this->setNReplicatesPerModel(15); } else { this->setNTop(3); this->setNRandom(3); this->setNReplicatesPerModel(3); } } }; void main_EvoNet() { PopulationTrainerExt<float> population_trainer; population_trainer.setNGenerations(5); population_trainer.setLogging(true); // define the population logger PopulationLogger<float> population_logger(true, true); const int n_threads = 8; // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(8); model_trainer.setMemorySize(1); model_trainer.setNEpochsTraining(50); model_trainer.setNEpochsValidation(50); model_trainer.setLogging(false, false); // define the model logger ModelLogger<float> model_logger; // define the data simulator const std::size_t input_size = 784; const std::size_t training_data_size = 1000; //60000; const std::size_t validation_data_size = 100; //10000; DataSimulatorExt<float> data_simulator; // read in the training data // const std::string training_data_filename = "C:/Users/domccl/GitHub/mnist/train-images.idx3-ubyte"; // const std::string training_labels_filename = "C:/Users/domccl/GitHub/mnist/train-labels.idx1-ubyte"; const std::string training_data_filename = "/home/user/data/train-images-idx3-ubyte"; const std::string training_labels_filename = "/home/user/data/train-labels-idx1-ubyte"; data_simulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // read in the validation data // const std::string validation_data_filename = "C:/Users/domccl/GitHub/mnist/t10k-images.idx3-ubyte"; // const std::string validation_labels_filename = "C:/Users/domccl/GitHub/mnist/t10k-labels.idx1-ubyte"; const std::string validation_data_filename = "/home/user/data/t10k-images-idx3-ubyte"; const std::string validation_labels_filename = "/home/user/data/t10k-labels-idx1-ubyte"; data_simulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); data_simulator.unitScaleData(); // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the output nodes std::vector<std::string> output_nodes; for (int i = 0; i < data_simulator.mnist_labels.size(); ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; // define the initial population [BUG FREE] std::cout << "Initializing the population..." << std::endl; std::vector<Model<float>> population; const int population_size = 1; for (int i = 0; i < population_size; ++i) { // baseline model std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; weight_init.reset(new RandWeightInitOp<float>(input_nodes.size())); solver.reset(new AdamOp<float>(0.01, 0.9, 0.999, 1e-8)); std::shared_ptr<LossFunctionOp<float>> loss_function(new MSELossOp<float>()); std::shared_ptr<LossFunctionGradOp<float>> loss_function_grad(new MSELossGradOp<float>()); Model<float> model; //TODO model.setId(i); population.push_back(model); } // Evolve the population std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); PopulationTrainerFile<float> population_trainer_file; population_trainer_file.storeModels(population, "SequencialMNIST"); population_trainer_file.storeModelValidations("SequencialMNISTErrors.csv", models_validation_errors_per_generation); } int main(int argc, char** argv) { // run the application main_EvoNet(); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_LOSSFUNCTION_H #define EVONET_LOSSFUNCTION_H #include <EvoNet/core/Preprocessing.h> #include <unsupported/Eigen/CXX11/Tensor> #include <cmath> #include <random> #include <iostream> namespace EvoNet { /** @brief Base class loss function. */ template<typename TensorT> class LossFunctionOp { public: LossFunctionOp() = default; LossFunctionOp(const TensorT& eps, const TensorT& scale) : eps_(eps), scale_(scale) {}; ~LossFunctionOp() = default; virtual std::string getName() = 0; virtual std::vector<TensorT> getParameters() const = 0; protected: TensorT eps_ = TensorT(1e-6); TensorT scale_ = TensorT(1); }; /** @brief Base class loss function gradient. */ template<typename TensorT> class LossFunctionGradOp { public: LossFunctionGradOp() = default; LossFunctionGradOp(const TensorT& eps, const TensorT& scale) : eps_(eps), scale_(scale) {}; ~LossFunctionGradOp() = default; virtual std::string getName() = 0; virtual std::vector<TensorT> getParameters() const = 0; protected: TensorT eps_ = (TensorT)1e-6; TensorT scale_ = (TensorT)1.0; }; /** @brief ManhattanDistance loss function. */ template<typename TensorT> class ManhattanDistanceLossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "ManhattanDistanceLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief ManhattanDistance loss function gradient. */ template<typename TensorT> class ManhattanDistanceLossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "ManhattanDistanceLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief L2Norm loss function. */ template<typename TensorT> class L2NormLossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "L2NormLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief L2Norm loss function gradient. */ template<typename TensorT> class L2NormLossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "L2NormLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief Binary Cross Entropy loss function. */ template<typename TensorT> class BCELossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "BCELossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief Binary Cross Entropy loss function gradient. The derivative of -(z * log(x) + (1 - z)*log(1-x)) is the following = (1-z)/(1-x) - z/x = -(x-z)/((x-1)*x) */ template<typename TensorT> class BCELossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "BCELossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief NegativeLogLikelihood loss function. NOTES: implemented as the following: def CrossEntropy(yHat, y): if y == 1: return -log(yHat) else: return -log(1 - yHat) */ template<typename TensorT> class NegativeLogLikelihoodLossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "NegativeLogLikelihoodLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief NegativeLogLikelihood loss function gradient. */ template<typename TensorT> class NegativeLogLikelihoodLossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "NegativeLogLikelihoodLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief MSE Mean Squared Error loss function. */ template<typename TensorT> class MSELossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "MSELossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief MSE Mean Squared Error loss function gradient. */ template<typename TensorT> class MSELossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "MSELossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief MAE Mean Absolute Error loss function. */ template<typename TensorT> class MAELossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "MAELossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MAE Mean Abasolute Error loss function gradient. */ template<typename TensorT> class MAELossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "MAELossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MRSE Mean Roote Squared Error loss function. */ template<typename TensorT> class MRSELossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "MRSELossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MRSE Mean Root Squared Error loss function gradient. */ template<typename TensorT> class MRSELossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "MRSELossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MLE Mean Logarithmic Error loss function. */ template<typename TensorT> class MLELossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "MLELossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MLE Mean Logarithmic Error loss function gradient. */ template<typename TensorT> class MLELossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "MLELossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief KLDivergenceMu loss function. References <NAME> Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) */ template<typename TensorT> class KLDivergenceMuLossOp : public LossFunctionOp<TensorT> { public: KLDivergenceMuLossOp() = default; KLDivergenceMuLossOp(const TensorT & eps, const TensorT & scale, const TensorT& capacity) : LossFunctionOp<TensorT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceMuLossOp() = default; std::string getName() { return "KLDivergenceMuLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_, this->capacity_}); } private: TensorT capacity_ = TensorT(0); }; /** @brief KLDivergenceMu loss function gradient. */ template<typename TensorT> class KLDivergenceMuLossGradOp : public LossFunctionGradOp<TensorT> { public: KLDivergenceMuLossGradOp() = default; KLDivergenceMuLossGradOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionGradOp<TensorT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceMuLossGradOp() = default; std::string getName() { return "KLDivergenceMuLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_, this->capacity_ }); } private: TensorT capacity_ = TensorT(0); }; /** @brief KLDivergenceLogVar loss function. References <NAME> Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) */ template<typename TensorT> class KLDivergenceLogVarLossOp : public LossFunctionOp<TensorT> { public: KLDivergenceLogVarLossOp() = default; KLDivergenceLogVarLossOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionOp<TensorT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceLogVarLossOp() = default; std::string getName() { return "KLDivergenceLogVarLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_, this->capacity_ }); } private: TensorT capacity_ = TensorT(0); }; /** @brief KLDivergenceLogVar loss function gradient. */ template<typename TensorT> class KLDivergenceLogVarLossGradOp : public LossFunctionGradOp<TensorT> { public: KLDivergenceLogVarLossGradOp() = default; KLDivergenceLogVarLossGradOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionGradOp<TensorT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceLogVarLossGradOp() = default; std::string getName() { return "KLDivergenceLogVarLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_, this->capacity_ }); } private: TensorT capacity_ = TensorT(0); }; /** @brief BCEWithLogits loss function. Binary Cross Entropy with integrated sigmoid layer z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) References: https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss PyTorch implementation: max_val = (-input).clamp(min=0) loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log() TensorFlow implementation: max(x, 0) - x * z + log(1 + exp(-abs(x))) */ template<typename TensorT> class BCEWithLogitsLossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "BCEWithLogitsLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief BCEWithLogits loss function gradient. Starting from the following BCEWithLogits formula x - x * z + log(1 + exp(-x)) The derivative with respect to x can be formulated as 1 - z + 1/(1 + exp(-x))*(-exp(-x)) = -((z - 1)*exp(x) + z)/(exp(x) + 1) */ template<typename TensorT> class BCEWithLogitsLossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "BCEWithLogitsLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief CrossEntropyWithLogits loss function. */ template<typename TensorT> class CrossEntropyWithLogitsLossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "CrossEntropyWithLogitsLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief CrossEntropyWithLogits loss function gradient. */ template<typename TensorT> class CrossEntropyWithLogitsLossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "CrossEntropyWithLogitsLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->eps_, this->scale_}); } }; /** @brief MSE Mean Squared Error loss function for when a value is not within a specified range. */ template<typename TensorT> class MSERangeUBLossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "MSERangeUBLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MSE Mean Squared Error loss function gradient for when a value is not within a specified range. */ template<typename TensorT> class MSERangeUBLossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "MSERangeUBLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MSE Mean Squared Error loss function for when a value is not within a specified range. */ template<typename TensorT> class MSERangeLBLossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "MSERangeLBLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MSE Mean Squared Error loss function gradient for when a value is not within a specified range. */ template<typename TensorT> class MSERangeLBLossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "MSERangeLBLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief KLDivergenceCat loss function. References Maddison 2017 The concrete distribution Jang 2017 Categorical reparameterization with Gumbel-softmax Dupont 2018 Learning disentangled joint continuous and discrete representations KLD = alpha * log(alpha) + log(n) where n is the number of categories */ template<typename TensorT> class KLDivergenceCatLossOp : public LossFunctionOp<TensorT> { public: KLDivergenceCatLossOp() = default; KLDivergenceCatLossOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionOp<TensorT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceCatLossOp() = default; std::string getName() { return "KLDivergenceCatLossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_, this->capacity_ }); } private: TensorT capacity_ = TensorT(0); }; /** @brief KLDivergenceCat loss function gradient. */ template<typename TensorT> class KLDivergenceCatLossGradOp : public LossFunctionGradOp<TensorT> { public: KLDivergenceCatLossGradOp() = default; KLDivergenceCatLossGradOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionGradOp<TensorT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceCatLossGradOp() = default; std::string getName() { return "KLDivergenceCatLossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_, this->capacity_ }); } private: TensorT capacity_ = TensorT(0); }; /** @brief MAPELoss loss function */ template<typename TensorT> class MAPELossOp : public LossFunctionOp<TensorT> { public: using LossFunctionOp<TensorT>::LossFunctionOp; std::string getName() { return "MAPELossOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief MAPELoss loss function gradient. */ template<typename TensorT> class MAPELossGradOp : public LossFunctionGradOp<TensorT> { public: using LossFunctionGradOp<TensorT>::LossFunctionGradOp; std::string getName() { return "MAPELossGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->eps_, this->scale_ }); } }; /** @brief Hinge loss function. Typically used for classification NOTES: implemented as the following: def Hinge(yHat, y): return np.max(0, 1 - yHat * y) */ } #endif //EVONET_LOSSFUNCTION_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelBuilderCpu test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> using namespace EvoNet; using namespace std; template <typename TensorT> void trainModel(Model<TensorT>& model, const std::vector<std::string>& input_node_names, const std::vector<std::string>& output_node_names, const Eigen::Tensor<float, 3>& input_values, Eigen::Tensor<float, 2> output_values, const int& batch_size, const int& memory_size, std::shared_ptr<LossFunctionTensorOp<TensorT, Eigen::DefaultDevice>>& loss_function, std::shared_ptr<LossFunctionGradTensorOp<TensorT, Eigen::DefaultDevice>>& loss_function_grad) { // Interpret the model ModelInterpreterDefaultDevice<TensorT> model_interpreter; model_interpreter.getForwardPropogationOperations(model, batch_size, memory_size, true, true, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, 0); // Assign the input data model_interpreter.mapValuesToLayers(model, input_values, input_node_names, "output"); model_interpreter.mapValuesToLayers(model, input_values, input_node_names, "input"); model_interpreter.initBiases(model); // create the bias model_interpreter.executeForwardPropogationOperations(0); //FP // calculate the model error and node output error const int layer_id = model.getNodesMap().at(output_node_names.front())->getTensorIndex().first; model_interpreter.executeModelErrorOperations(output_values, layer_id, loss_function, loss_function_grad, 0); model_interpreter.executeBackwardPropogationOperations(0); // BP model_interpreter.executeWeightErrorOperations(); // Weight error model_interpreter.executeWeightUpdateOperations(0); // Weight update // retrieve the results model_interpreter.getModelResults(model, true, true, true, false); } BOOST_AUTO_TEST_SUITE(ModelBuilderCpu1) BOOST_AUTO_TEST_CASE(addFullyConnected1) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the fully connected std::vector<std::string>node_names_output = model_builder.addFullyConnected(model, "Output", "Output", node_names_input, output_size, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.0f, 0.0f, true, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setConstant(1); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setConstant(0); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0)<<std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 2, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 2, 2 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Output-bias_000000000000_to_Output_000000000000", "Output-bias_000000000001_to_Output_000000000001", "Input_000000000000_to_Output_000000000000", "Input_000000000000_to_Output_000000000001", "Input_000000000000_to_Output_000000000000", "Input_000000000000_to_Output_000000000001" }; std::vector<float> weight_values_test = { 0, 0, 0.9, 0.9, 0.9, 0.9 }; for (int i = 0; i < weight_names.size();++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addSinglyConnected1) { ModelBuilder<float> model_builder; Model<float> model; std::vector<std::string> node_names; // make the input node_names = model_builder.addInputNodes(model, "Input", "Input", 2); // make the fully connected node_names = model_builder.addSinglyConnected(model, "Hidden", "Mod1", node_names, 2, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.2f, 0.8f); std::vector<std::string> node_names_test = { "Hidden_000000000000", "Hidden-bias_000000000000", "Hidden_000000000001", "Hidden-bias_000000000001" }; std::vector<std::string> link_names_test = { "Hidden-bias_000000000000_to_Hidden_000000000000", "Hidden-bias_000000000001_to_Hidden_000000000001", "Input_000000000000_to_Hidden_000000000000", "Input_000000000000_to_Hidden_000000000000"}; std::vector<std::string> weight_names_test = { "Hidden-bias_000000000000_to_Hidden_000000000000", "Hidden-bias_000000000001_to_Hidden_000000000001", "Input_000000000000_to_Hidden_000000000000", "Input_000000000000_to_Hidden_000000000000"}; // TODO... } BOOST_AUTO_TEST_CASE(addSoftMax) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the fully connected std::vector<std::string> node_names_output = model_builder.addSoftMax(model, "SoftMax", "Mod1", node_names_input, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 4}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0.0474259, 0.952574} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 3.72271658e-15, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 0.0474259, 0.952574 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Input_000000000000_to_SoftMax-In_000000000000", "SoftMax-In_000000000000_to_SoftMax-Sum", "SoftMax-In_000000000000_to_SoftMax-Out_000000000000", "SoftMax-Sum_to_SoftMax-Out_000000000000", "Input_000000000001_to_SoftMax-In_000000000001", "SoftMax-In_000000000001_to_SoftMax-Sum", "SoftMax-In_000000000001_to_SoftMax-Out_000000000001", "SoftMax-Sum_to_SoftMax-Out_000000000001" }; std::vector<float> weight_values_test = { 1, 1, 1, 1, 1, 1, 1, 1 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addStableSoftMax) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the softmax std::vector<std::string> node_names_output = model_builder.addStableSoftMax(model, "SoftMax", "Mod1", node_names_input, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 4}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0.0474259, 0.952574} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0)<<std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 3.72271658e-15, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 0.0474259, 0.952574 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Input_000000000000_to_SoftMax-In_000000000000", "SoftMax-In_000000000000_to_SoftMax-Sum", "SoftMax-In_000000000000_to_SoftMax-Out_000000000000", "SoftMax-Sum_to_SoftMax-Out_000000000000", "Input_000000000000_to_SoftMax-Max", "SoftMax-Max_to_SoftMax-In_000000000000", "Input_000000000001_to_SoftMax-In_000000000001", "SoftMax-In_000000000001_to_SoftMax-Sum", "SoftMax-In_000000000001_to_SoftMax-Out_000000000001", "SoftMax-Sum_to_SoftMax-Out_000000000001", "Input_000000000001_to_SoftMax-Max", "SoftMax-Max_to_SoftMax-In_000000000001" }; std::vector<float> weight_values_test = { 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addConvolution1) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 16; const int output_size = 9; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size); // make the fully connected std::vector<std::string> node_names_output = model_builder.addConvolution( model, "Filter", "Mod1", node_names_input, 4, 4, 0, 0, 2, 2, 1, 0, 0, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.0f, 0.0f, true, true, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 0, 0, 1, 2, 1, 2}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 0, 0, 0, 0, 0, 0, 0, 0} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 13.2778, 1e-3); // test for the expected node outputs std::vector<float> output_values_test = { 4, 5, 7, 4, 4, 4, 7, 6, 4 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Filter-bias_to_out", "Filter-Mod1_H000000000000-W000000000000", "Filter-Mod1_H000000000001-W000000000000", "Filter-Mod1_H000000000000-W000000000001", "Filter-Mod1_H000000000001-W000000000001" }; std::vector<float> weight_values_test = { 0.5, 0.511111, 0.411111, 0.533333, 0.388889 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addConvolution1WithoutSharedWeights) { ModelBuilder<float> model_builder; Model<float> model; std::vector<std::string> node_names; // make the input node_names = model_builder.addInputNodes(model, "Input", "Input", 16); // make the fully connected node_names = model_builder.addConvolution( model, "Filter", "Mod1", node_names, 4, 4, 0, 0, 2, 2, 1, 0, 0, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.2f, 0.8f, true, true, false); std::vector<std::string> weight_names_bias = {"Filter-out_H000000000000-W000000000000-bias_to_Filter-out_H000000000000-W000000000000_Mod1", "Filter-out_H000000000000-W000000000001-bias_to_Filter-out_H000000000000-W000000000001_Mod1","Filter-out_H000000000000-W000000000002-bias_to_Filter-out_H000000000000-W000000000002_Mod1", "Filter-out_H000000000001-W000000000000-bias_to_Filter-out_H000000000001-W000000000000_Mod1","Filter-out_H000000000001-W000000000001-bias_to_Filter-out_H000000000001-W000000000001_Mod1", "Filter-out_H000000000001-W000000000002-bias_to_Filter-out_H000000000001-W000000000002_Mod1","Filter-out_H000000000002-W000000000000-bias_to_Filter-out_H000000000002-W000000000000_Mod1", "Filter-out_H000000000002-W000000000001-bias_to_Filter-out_H000000000002-W000000000001_Mod1","Filter-out_H000000000002-W000000000002-bias_to_Filter-out_H000000000002-W000000000002_Mod1"}; std::vector<std::string> weight_names_test = { "Input_000000000000_to_Filter-out_H000000000000-W000000000000_Mod1","Input_000000000001_to_Filter-out_H000000000000-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000000_Mod1","Input_000000000002_to_Filter-out_H000000000001-W000000000000_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000000_Mod1","Input_000000000003_to_Filter-out_H000000000002-W000000000000_Mod1", "Input_000000000004_to_Filter-out_H000000000000-W000000000000_Mod1","Input_000000000004_to_Filter-out_H000000000000-W000000000001_Mod1", "Input_000000000005_to_Filter-out_H000000000000-W000000000000_Mod1","Input_000000000005_to_Filter-out_H000000000000-W000000000001_Mod1", "Input_000000000005_to_Filter-out_H000000000001-W000000000000_Mod1","Input_000000000005_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000006_to_Filter-out_H000000000001-W000000000000_Mod1","Input_000000000006_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000006_to_Filter-out_H000000000002-W000000000000_Mod1","Input_000000000006_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000007_to_Filter-out_H000000000002-W000000000000_Mod1","Input_000000000007_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000008_to_Filter-out_H000000000000-W000000000001_Mod1","Input_000000000008_to_Filter-out_H000000000000-W000000000002_Mod1", "Input_000000000009_to_Filter-out_H000000000000-W000000000001_Mod1","Input_000000000009_to_Filter-out_H000000000000-W000000000002_Mod1", "Input_000000000009_to_Filter-out_H000000000001-W000000000001_Mod1","Input_000000000009_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000010_to_Filter-out_H000000000001-W000000000001_Mod1","Input_000000000010_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000010_to_Filter-out_H000000000002-W000000000001_Mod1","Input_000000000010_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000011_to_Filter-out_H000000000002-W000000000001_Mod1","Input_000000000011_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000012_to_Filter-out_H000000000000-W000000000002_Mod1","Input_000000000013_to_Filter-out_H000000000000-W000000000002_Mod1", "Input_000000000013_to_Filter-out_H000000000001-W000000000002_Mod1","Input_000000000014_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000014_to_Filter-out_H000000000002-W000000000002_Mod1","Input_000000000015_to_Filter-out_H000000000002-W000000000002_Mod1" }; } BOOST_AUTO_TEST_CASE(addConvolution2) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 16; const int output_size = 9; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the fully connected std::vector<std::string> node_names_output = model_builder.addConvolution( model, "Filter", "Mod1", node_names_input, 4, 4, 2, 2, 4, 4, 2, 0, 0, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.0f, 0.0f, true, true, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 0, 0, 1, 2, 1, 2}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 0, 0, 0, 0, 0, 0, 0, 0} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 62.8333, 1e-3); // test for the expected node outputs std::vector<float> output_values_test = { 4, 10, 10, 10, 19, 16, 7, 10, 7 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Filter-bias_to_out", "Filter-Mod1_H000000000000-W000000000000", "Filter-Mod1_H000000000001-W000000000000", "Filter-Mod1_H000000000000-W000000000001", "Filter-Mod1_H000000000001-W000000000001" }; std::vector<float> weight_values_test = { -0.0333334, 0.5, 0, 0.633333, 0.26666671 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addConvolution3) { ModelBuilder<float> model_builder; Model<float> model; std::vector<std::string> node_names_input, node_names; // make the input node_names_input = model_builder.addInputNodes(model, "Input", "Input", 16); // make the convolution layer node_names = model_builder.addConvolution( model, "Filter", "Mod1", node_names_input, 4, 4, 2, 2, 2, 2, 1, 1, 1, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.2f, 0.8f); // add a second filter model_builder.addConvolution( model, "Filter", "Mod2", node_names_input, node_names, 4, 4, 2, 2, 2, 2, 1, 1, 1, std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.2f, 0.8f); std::vector<std::string> node_names_test = { "Filter-bias" }; std::vector<std::string> weight_names_test = { "Filter-bias_to_out", "Filter-Mod1_H000000000000-W000000000000", "Filter-Mod1_H000000000001-W000000000000", "Filter-Mod1_H000000000000-W000000000001", "Filter-Mod1_H000000000001-W000000000001", "Filter-Mod2_H000000000000-W000000000000", "Filter-Mod2_H000000000001-W000000000000", "Filter-Mod2_H000000000000-W000000000001", "Filter-Mod2_H000000000001-W000000000001" }; // TODO... } BOOST_AUTO_TEST_CASE(addNormalization1) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 5; const int output_size = 5; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the normalization std::vector<std::string> node_names_output = model_builder.addNormalization(model, "Norm", "Mod1", node_names_input, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 2, 3, 4, 5}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {-1.414213562,-0.707106781,0,0.707106781,1.414213562} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 0, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { -1.414213562,-0.707106781,0,0.707106781,1.414213562 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Norm-Mean_to_Input_000000000000-SourceMinMean","Norm-Mean_to_Input_000000000001-SourceMinMean", "Input_000000000000-SourceMinMean_to_Input_000000000000-Normalized", "Input_000000000000-SourceMinMean_to_Norm-Variance","Input_000000000000_to_Input_000000000000-SourceMinMean","Input_000000000000_to_Norm-Mean", "Input_000000000001-SourceMinMean_to_Input_000000000001-Normalized", "Input_000000000001-SourceMinMean_to_Norm-Variance","Input_000000000001_to_Input_000000000001-SourceMinMean","Input_000000000001_to_Norm-Mean", "Norm-Variance_to_Input_000000000000-Normalized","Norm-Variance_to_Input_000000000001-Normalized" }; std::vector<float> weight_values_test = { -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addUnitScale1) { ModelBuilder<float> model_builder; Model<float> model; std::vector<std::string> node_names; // make the input node_names = model_builder.addInputNodes(model, "Input", "Input", 2); // make the normalization node_names = model_builder.addUnitScale(model, "Norm", "Mod1", node_names); std::vector<std::string> node_names_test = { "Norm-Min", "Norm-Max", "Norm-Scalar", "Input_000000000000-UnitScaled", "Input_000000000001-UnitScaled"}; std::vector<std::string> link_names_test = { "Input_000000000000_to_Norm-Max","Input_000000000000_to_Norm-Min","Input_000000000001_to_Norm-Max","Input_000000000001_to_Norm-Min", "Norm-Max_to_Norm-Scalar","Norm-Min_to_Norm-Scalar", "Norm-Scalar_to_Input_000000000000-UnitScaled","Norm-Scalar_to_Input_000000000001-UnitScaled" }; // TODO } BOOST_AUTO_TEST_CASE(addLinearScale1) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the normalization std::vector<std::string> node_names_output = model_builder.addLinearScale(model, "Norm", "Mod1", node_names_input, 0, 1, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 4}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 1} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0),0, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 0, 1 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Input_000000000000-DomainMinOffset_to_Input_000000000000-DomainScaled","Input_000000000000-DomainScaled_to_Input_000000000000-RangeMaxMinScale", "Input_000000000000-RangeMaxMinScale_to_Input_000000000000-LinearScaleFunctor","Input_000000000000_to_Input_000000000000-DomainMinOffset", "Input_000000000000_to_Norm-Max","Input_000000000000_to_Norm-Min","Input_000000000001-DomainMinOffset_to_Input_000000000001-DomainScaled", "Input_000000000001-DomainScaled_to_Input_000000000001-RangeMaxMinScale","Input_000000000001-RangeMaxMinScale_to_Input_000000000001-LinearScaleFunctor", "Input_000000000001_to_Input_000000000001-DomainMinOffset","Input_000000000001_to_Norm-Max","Input_000000000001_to_Norm-Min", "Mod1-RangeMinBias_to_Input_000000000000-LinearScaleFunctor","Mod1-RangeMinBias_to_Input_000000000001-LinearScaleFunctor","Norm-Max_to_Norm-Scalar", "Norm-Min_to_Input_000000000000-DomainMinOffset","Norm-Min_to_Input_000000000001-DomainMinOffset","Norm-Min_to_Norm-Scalar", "Norm-Scalar_to_Input_000000000000-DomainScaled","Norm-Scalar_to_Input_000000000001-DomainScaled","Mod1-RangeMaxMinBias_to_Input_000000000000-RangeMaxMinScale", "Mod1-RangeMaxMinBias_to_Input_000000000001-RangeMaxMinScale" }; std::vector<float> weight_values_test = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, -1, -1, -1, 1, 1, 1, 1 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addGaussianEncoding) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> mu_node_names = model_builder.addInputNodes(model, "Mu", "Mu", input_size, true); std::vector<std::string> logvar_node_names = model_builder.addInputNodes(model, "LogVar", "LogVar", input_size, true); // make the Gaussian encoding std::vector<std::string> node_names_output = model_builder.addGaussianEncoding(model, "Encoding", "Mod1", mu_node_names, logvar_node_names, true); // define the input nodes std::vector<std::string> node_names_input; for (int i = 0; i < input_size; ++i) node_names_input.push_back(mu_node_names.at(i)); for (int i = 0; i < input_size; ++i) node_names_input.push_back(logvar_node_names.at(i)); for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Encoding_%012d-Sampler", i); std::string name(name_char); node_names_input.push_back(name); } // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, 3*input_size); input_values.setValues({ {{1, 2, 0.1, 0.2, -0.1, 0.1}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 0} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 1.31376994, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 0.894872904, 2.11051702 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "LogVar_000000000000_to_LogVar_000000000000-Scalar","Encoding_000000000000-Sampler_to_LogVar_000000000000-StdDev", "LogVar_000000000001_to_LogVar_000000000001-Scalar","Encoding_000000000001-Sampler_to_LogVar_000000000001-StdDev", "LogVar_000000000000-StdDev_to_Encoding_000000000000","Mu_000000000000_to_Encoding_000000000000", "LogVar_000000000001-StdDev_to_Encoding_000000000001","Mu_000000000001_to_Encoding_000000000001" }; std::vector<float> weight_values_test = { 0.5, 1, 0.5, 1, 1, 1, 1, 1 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addCategoricalEncoding) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> alpha_node_names = model_builder.addInputNodes(model, "Alpha", "Alpha", input_size, true); // make the normalization std::vector<std::string> node_names_output = model_builder.addCategoricalEncoding(model, "Encoding", "Mod1", alpha_node_names, true); // define the input nodes std::vector<std::string> node_names_input; for (int i = 0; i < input_size; ++i) node_names_input.push_back(alpha_node_names.at(i)); for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Encoding_%012d-GumbelSampler", i); std::string name(name_char); node_names_input.push_back(name); } for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Encoding_%012d-InverseTau", i); std::string name(name_char); node_names_input.push_back(name); } // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, 3*input_size); input_values.setValues({ {{1, 2, -0.1, 0.1, 1.5, 1.5}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 0} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 0.189135298, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 0.141851, 0.858149 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Alpha_000000000000_to_Encoding_000000000000-LogAlphaSampler", "Alpha_000000000001_to_Encoding_000000000001-LogAlphaSampler", "Encoding-SoftMax-In_000000000000_to_Encoding-SoftMax-Out_000000000000", "Encoding-SoftMax-In_000000000000_to_Encoding-SoftMax-Sum", "Encoding-SoftMax-In_000000000001_to_Encoding-SoftMax-Out_000000000001", "Encoding-SoftMax-In_000000000001_to_Encoding-SoftMax-Sum", "Encoding-SoftMax-Sum_to_Encoding-SoftMax-Out_000000000000", "Encoding-SoftMax-Sum_to_Encoding-SoftMax-Out_000000000001", "Encoding_000000000000-GumbelSampler_to_Encoding_000000000000-LogAlphaSampler", "Encoding_000000000000-InverseTau_to_Encoding_000000000000-SoftmaxArgs", "Encoding_000000000000-LogAlphaSampler_to_Encoding_000000000000-SoftmaxArgs", "Encoding_000000000000-SoftmaxArgs_to_Encoding-SoftMax-In_000000000000","Encoding_000000000001-SoftmaxArgs_to_Encoding-SoftMax-In_000000000001", "Encoding_000000000001-GumbelSampler_to_Encoding_000000000001-LogAlphaSampler", "Encoding_000000000001-InverseTau_to_Encoding_000000000001-SoftmaxArgs", "Encoding_000000000001-LogAlphaSampler_to_Encoding_000000000001-SoftmaxArgs" }; std::vector<float> weight_values_test = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addDiscriminator) { ModelBuilder<float> model_builder; Model<float> model; std::vector<std::string> node_names; // make the input std::vector<std::string> encoding_node_names = model_builder.addInputNodes(model, "Mu", "Mu", 2); // make the normalization node_names = model_builder.addDiscriminator(model, "Discriminator", "Mod1", encoding_node_names); std::vector<std::string> node_names_test = { "Discriminator-Output-000000000000", "Discriminator-Output-000000000001", "Discriminator-Sampler-000000000000", "Discriminator-Sampler-000000000001" }; std::vector<std::string> link_names_test = { "Mu_000000000000_to_Discriminator-Output-000000000000","Mu_000000000001_to_Discriminator-Output-000000000001", "Discriminator-Sampler-000000000000_to_Discriminator-Output-000000000000","Discriminator-Sampler-000000000001_to_Discriminator-Output-000000000001" }; std::vector<std::string> weight_names_test = { "Mu_000000000000_to_Discriminator-Output-000000000000","Mu_000000000001_to_Discriminator-Output-000000000001", "Discriminator-Sampler-000000000000_to_Discriminator-Output-000000000000","Discriminator-Sampler-000000000001_to_Discriminator-Output-000000000001" }; // TODO } BOOST_AUTO_TEST_CASE(addLSTMBlock1) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the LSTM block1 std::vector<std::string> node_names_output = model_builder.addLSTMBlock1(model, "LSTM", "Mod1", node_names_input, input_size, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.0f, 0.0f, true, true, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); model.findCycles(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 2}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 0} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 3.70516539, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 2.72219, 2.72219 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Input_000000000000_to_LSTM-BlockGateForget","Input_000000000000_to_LSTM-BlockGateInput","Input_000000000000_to_LSTM-BlockGateOutput","Input_000000000000_to_LSTM-BlockInput-000000000000","Input_000000000000_to_LSTM-BlockInput-000000000001", "Input_000000000001_to_LSTM-BlockGateForget","Input_000000000001_to_LSTM-BlockGateInput","Input_000000000001_to_LSTM-BlockGateOutput","Input_000000000001_to_LSTM-BlockInput-000000000000","Input_000000000001_to_LSTM-BlockInput-000000000001", "LSTM-BlockMultOutput-000000000000_to_LSTM-BlockGateForget","LSTM-BlockMultOutput-000000000000_to_LSTM-BlockGateInput","LSTM-BlockMultOutput-000000000000_to_LSTM-BlockGateOutput","LSTM-BlockMultOutput-000000000000_to_LSTM-BlockInput-000000000000", "LSTM-BlockMultOutput-000000000001_to_LSTM-BlockGateForget","LSTM-BlockMultOutput-000000000001_to_LSTM-BlockGateInput","LSTM-BlockMultOutput-000000000001_to_LSTM-BlockGateOutput","LSTM-BlockMultOutput-000000000001_to_LSTM-BlockInput-000000000001", "LSTM-BlockGateForget-bias_to_LSTM-BlockGateForget","LSTM-BlockGateInput-bias_to_LSTM-BlockGateInput","LSTM-BlockGateOutput-bias_to_LSTM-BlockGateOutput", "LSTM-BlockInput-000000000000-bias-000000000000_to_LSTM-BlockInput-000000000000","LSTM-BlockInput-000000000001-bias-000000000001_to_LSTM-BlockInput-000000000001" }; std::vector<float> weight_values_test = { 1, 0.843730986, 0.843730986, 0.876494527, 0.876494527, 1, 0.687461972, 0.687461972, 0.752988994, 0.752988994, 1, 0.574605703, 0.574605703, 0.663794279, 1, 0.574605703, 0.574605703, 0.663794279, 0, 0, 0, 0, 0}; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addLSTM) { // NO Test } BOOST_AUTO_TEST_CASE(addDotProdAttention1) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 3; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the fully connected std::vector<std::string> node_names_output = model_builder.addDotProdAttention(model, "Hidden", "Mod1", node_names_input, node_names_input, node_names_input, output_size, output_size, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.0f, 0.0f, true, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 4}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 0, 0} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 1.38889, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 1.66667, 1.66667, 1.66667 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-3); } // test for the expected weights std::vector<std::string> weight_names = { "Hidden-scalar_to_Hidden_scores_000000000000","Hidden-scalar_to_Hidden_scores_000000000001", "Hidden-scalar_to_Hidden_scores_000000000002","Hidden_keys_000000000000_to_Hidden_scores_000000000000","Hidden_keys_000000000001_to_Hidden_scores_000000000001", "Hidden_keys_000000000002_to_Hidden_scores_000000000002","Hidden_query_000000000000_to_Hidden_scores_000000000000","Hidden_query_000000000001_to_Hidden_scores_000000000001", "Hidden_query_000000000002_to_Hidden_scores_000000000002", "Hidden_values_000000000000_to_Hidden_attention_000000000000","Hidden_values_000000000001_to_Hidden_attention_000000000001","Hidden_values_000000000002_to_Hidden_attention_000000000002", "Input_000000000000_to_Hidden_keys_000000000000","Input_000000000000_to_Hidden_keys_000000000001","Input_000000000000_to_Hidden_keys_000000000002", "Input_000000000000_to_Hidden_query_000000000000","Input_000000000000_to_Hidden_query_000000000001","Input_000000000000_to_Hidden_query_000000000002", "Input_000000000000_to_Hidden_values_000000000000","Input_000000000000_to_Hidden_values_000000000001","Input_000000000000_to_Hidden_values_000000000002", "Input_000000000001_to_Hidden_keys_000000000000","Input_000000000001_to_Hidden_keys_000000000001","Input_000000000001_to_Hidden_keys_000000000002", "Input_000000000001_to_Hidden_query_000000000000","Input_000000000001_to_Hidden_query_000000000001","Input_000000000001_to_Hidden_query_000000000002", "Input_000000000001_to_Hidden_values_000000000000","Input_000000000001_to_Hidden_values_000000000001","Input_000000000001_to_Hidden_values_000000000002", "Hidden_softMax-Out_000000000000_to_Hidden_attention_000000000000", "Hidden_softMax-Out_000000000001_to_Hidden_attention_000000000001", "Hidden_softMax-Out_000000000002_to_Hidden_attention_000000000002" }; std::vector<float> weight_values_test = { 0.57735, 0.57735, 0.57735, 1, 1, 1, 1, 1, 1, 1, 1, 1, -200.35054, -200.35054, -200.35054, -200.35054, -200.35054, -200.35054, 0.981481493, 0.981481493, 0.981481493, -804.402161, -804.402161, -804.402161, -804.402161, -804.402161, -804.402161, 0.92592591, 0.92592591, 0.92592591, 1, 1, 1 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addMultiHeadAttention1) { ModelBuilder<float> model_builder; Model<float> model; std::vector<std::string> node_names; // make the input node_names = model_builder.addInputNodes(model, "Input", "Input", 2); // make the fully connected node_names = model_builder.addMultiHeadAttention(model, "Hidden", "Mod1", node_names, node_names, node_names, 2, "DotProd", 2, 3, 3, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.0f, 0.0f, true, true); std::vector<std::string> weight_names_test = { "Hidden_MultiHead-bias_000000000000_to_Hidden_MultiHead_000000000000", "Hidden_MultiHead-bias_000000000001_to_Hidden_MultiHead_000000000001", "Hidden-000000000000_attention_000000000000_to_Hidden_MultiHead_000000000000", "Hidden-000000000000_attention_000000000001_to_Hidden_MultiHead_000000000000", "Hidden-000000000000_attention_000000000002_to_Hidden_MultiHead_000000000000", "Hidden-000000000000_attention_000000000000_to_Hidden_MultiHead_000000000001", "Hidden-000000000000_attention_000000000001_to_Hidden_MultiHead_000000000001", "Hidden-000000000000_attention_000000000002_to_Hidden_MultiHead_000000000001", "Hidden-000000000001_attention_000000000000_to_Hidden_MultiHead_000000000000", "Hidden-000000000001_attention_000000000001_to_Hidden_MultiHead_000000000000", "Hidden-000000000001_attention_000000000002_to_Hidden_MultiHead_000000000000", "Hidden-000000000001_attention_000000000000_to_Hidden_MultiHead_000000000001", "Hidden-000000000001_attention_000000000001_to_Hidden_MultiHead_000000000001", "Hidden-000000000001_attention_000000000002_to_Hidden_MultiHead_000000000001"}; // TODO } BOOST_AUTO_TEST_CASE(addProjection1) { ModelBuilder<float> model_builder; Model<float> model; std::vector<std::string> node_names; // make the input node_names = model_builder.addInputNodes(model, "Input", "Input", 4); // make the fully connected node_names = model_builder.addProjection( model, "Filter", "Mod1", node_names, 2, 2, 0, 0, 4, 4, 1, 0, 0, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.2f, 0.8f); std::vector<std::string> node_names_test = { "Filter-out_H000000000000-W000000000000", "Filter-out_H000000000000-W000000000001", "Filter-out_H000000000000-W000000000002", "Filter-out_H000000000000-W000000000003", "Filter-out_H000000000000-W000000000004", "Filter-out_H000000000001-W000000000000", "Filter-out_H000000000001-W000000000001", "Filter-out_H000000000001-W000000000002", "Filter-out_H000000000001-W000000000003", "Filter-out_H000000000001-W000000000004", "Filter-out_H000000000002-W000000000000", "Filter-out_H000000000002-W000000000001", "Filter-out_H000000000002-W000000000002", "Filter-out_H000000000002-W000000000003", "Filter-out_H000000000002-W000000000004", "Filter-out_H000000000003-W000000000000", "Filter-out_H000000000003-W000000000001", "Filter-out_H000000000003-W000000000002", "Filter-out_H000000000003-W000000000003", "Filter-out_H000000000003-W000000000004", "Filter-out_H000000000004-W000000000000", "Filter-out_H000000000004-W000000000001", "Filter-out_H000000000004-W000000000002", "Filter-out_H000000000004-W000000000003", "Filter-out_H000000000004-W000000000004" }; std::vector<std::string> link_names_test = { "Input_000000000000_to_Filter-out_H000000000000-W000000000000_Mod1", "Input_000000000000_to_Filter-out_H000000000000-W000000000001_Mod1", "Input_000000000000_to_Filter-out_H000000000000-W000000000002_Mod1", "Input_000000000000_to_Filter-out_H000000000000-W000000000003_Mod1", "Input_000000000000_to_Filter-out_H000000000001-W000000000000_Mod1", "Input_000000000000_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000000_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000000_to_Filter-out_H000000000001-W000000000003_Mod1", "Input_000000000000_to_Filter-out_H000000000002-W000000000000_Mod1", "Input_000000000000_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000000_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000000_to_Filter-out_H000000000002-W000000000003_Mod1", "Input_000000000000_to_Filter-out_H000000000003-W000000000000_Mod1", "Input_000000000000_to_Filter-out_H000000000003-W000000000001_Mod1", "Input_000000000000_to_Filter-out_H000000000003-W000000000002_Mod1", "Input_000000000000_to_Filter-out_H000000000003-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000000-W000000000001_Mod1", "Input_000000000002_to_Filter-out_H000000000000-W000000000002_Mod1", "Input_000000000002_to_Filter-out_H000000000000-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000000-W000000000004_Mod1", "Input_000000000002_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000002_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000002_to_Filter-out_H000000000001-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000001-W000000000004_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000004_Mod1", "Input_000000000002_to_Filter-out_H000000000003-W000000000001_Mod1", "Input_000000000002_to_Filter-out_H000000000003-W000000000002_Mod1", "Input_000000000002_to_Filter-out_H000000000003-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000003-W000000000004_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000003_Mod1", "Input_000000000001_to_Filter-out_H000000000002-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000001_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000001_to_Filter-out_H000000000002-W000000000003_Mod1", "Input_000000000001_to_Filter-out_H000000000003-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000003-W000000000001_Mod1", "Input_000000000001_to_Filter-out_H000000000003-W000000000002_Mod1", "Input_000000000001_to_Filter-out_H000000000003-W000000000003_Mod1", "Input_000000000001_to_Filter-out_H000000000004-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000004-W000000000001_Mod1", "Input_000000000001_to_Filter-out_H000000000004-W000000000002_Mod1", "Input_000000000001_to_Filter-out_H000000000004-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000003_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000003_to_Filter-out_H000000000001-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000001-W000000000004_Mod1", "Input_000000000003_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000003_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000003_to_Filter-out_H000000000002-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000002-W000000000004_Mod1", "Input_000000000003_to_Filter-out_H000000000003-W000000000001_Mod1", "Input_000000000003_to_Filter-out_H000000000003-W000000000002_Mod1", "Input_000000000003_to_Filter-out_H000000000003-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000003-W000000000004_Mod1", "Input_000000000003_to_Filter-out_H000000000004-W000000000001_Mod1", "Input_000000000003_to_Filter-out_H000000000004-W000000000002_Mod1", "Input_000000000003_to_Filter-out_H000000000004-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000004-W000000000004_Mod1" }; std::vector<std::string> weight_names_test = { "Filter-Mod1_H000000000000-W000000000000", "Filter-Mod1_H000000000000-W000000000001", "Filter-Mod1_H000000000000-W000000000002", "Filter-Mod1_H000000000000-W000000000003", "Filter-Mod1_H000000000001-W000000000000", "Filter-Mod1_H000000000001-W000000000001", "Filter-Mod1_H000000000001-W000000000002", "Filter-Mod1_H000000000001-W000000000003", "Filter-Mod1_H000000000002-W000000000000", "Filter-Mod1_H000000000002-W000000000001", "Filter-Mod1_H000000000002-W000000000002", "Filter-Mod1_H000000000002-W000000000003", "Filter-Mod1_H000000000003-W000000000000", "Filter-Mod1_H000000000003-W000000000001", "Filter-Mod1_H000000000003-W000000000002", "Filter-Mod1_H000000000003-W000000000003"}; // TODO } BOOST_AUTO_TEST_CASE(addProjection1WithoutSharedWeights) { ModelBuilder<float> model_builder; Model<float> model; std::vector<std::string> node_names; // make the input node_names = model_builder.addInputNodes(model, "Input", "Input", 4); // make the fully connected node_names = model_builder.addProjection( model, "Filter", "Mod1", node_names, 2, 2, 0, 0, 4, 4, 1, 0, 0, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.2f, 0.8f, true, true, false); std::vector<std::string> node_names_test = { "Filter-out_H000000000000-W000000000000", "Filter-out_H000000000000-W000000000001", "Filter-out_H000000000000-W000000000002", "Filter-out_H000000000000-W000000000003", "Filter-out_H000000000000-W000000000004", "Filter-out_H000000000001-W000000000000", "Filter-out_H000000000001-W000000000001", "Filter-out_H000000000001-W000000000002", "Filter-out_H000000000001-W000000000003", "Filter-out_H000000000001-W000000000004", "Filter-out_H000000000002-W000000000000", "Filter-out_H000000000002-W000000000001", "Filter-out_H000000000002-W000000000002", "Filter-out_H000000000002-W000000000003", "Filter-out_H000000000002-W000000000004", "Filter-out_H000000000003-W000000000000", "Filter-out_H000000000003-W000000000001", "Filter-out_H000000000003-W000000000002", "Filter-out_H000000000003-W000000000003", "Filter-out_H000000000003-W000000000004", "Filter-out_H000000000004-W000000000000", "Filter-out_H000000000004-W000000000001", "Filter-out_H000000000004-W000000000002", "Filter-out_H000000000004-W000000000003", "Filter-out_H000000000004-W000000000004" }; std::vector<std::string> link_names_test = { "Input_000000000000_to_Filter-out_H000000000000-W000000000000_Mod1", "Input_000000000000_to_Filter-out_H000000000000-W000000000001_Mod1", "Input_000000000000_to_Filter-out_H000000000000-W000000000002_Mod1", "Input_000000000000_to_Filter-out_H000000000000-W000000000003_Mod1", "Input_000000000000_to_Filter-out_H000000000001-W000000000000_Mod1", "Input_000000000000_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000000_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000000_to_Filter-out_H000000000001-W000000000003_Mod1", "Input_000000000000_to_Filter-out_H000000000002-W000000000000_Mod1", "Input_000000000000_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000000_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000000_to_Filter-out_H000000000002-W000000000003_Mod1", "Input_000000000000_to_Filter-out_H000000000003-W000000000000_Mod1", "Input_000000000000_to_Filter-out_H000000000003-W000000000001_Mod1", "Input_000000000000_to_Filter-out_H000000000003-W000000000002_Mod1", "Input_000000000000_to_Filter-out_H000000000003-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000000-W000000000001_Mod1", "Input_000000000002_to_Filter-out_H000000000000-W000000000002_Mod1", "Input_000000000002_to_Filter-out_H000000000000-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000000-W000000000004_Mod1", "Input_000000000002_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000002_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000002_to_Filter-out_H000000000001-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000001-W000000000004_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000002-W000000000004_Mod1", "Input_000000000002_to_Filter-out_H000000000003-W000000000001_Mod1", "Input_000000000002_to_Filter-out_H000000000003-W000000000002_Mod1", "Input_000000000002_to_Filter-out_H000000000003-W000000000003_Mod1", "Input_000000000002_to_Filter-out_H000000000003-W000000000004_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000001_to_Filter-out_H000000000001-W000000000003_Mod1", "Input_000000000001_to_Filter-out_H000000000002-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000001_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000001_to_Filter-out_H000000000002-W000000000003_Mod1", "Input_000000000001_to_Filter-out_H000000000003-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000003-W000000000001_Mod1", "Input_000000000001_to_Filter-out_H000000000003-W000000000002_Mod1", "Input_000000000001_to_Filter-out_H000000000003-W000000000003_Mod1", "Input_000000000001_to_Filter-out_H000000000004-W000000000000_Mod1", "Input_000000000001_to_Filter-out_H000000000004-W000000000001_Mod1", "Input_000000000001_to_Filter-out_H000000000004-W000000000002_Mod1", "Input_000000000001_to_Filter-out_H000000000004-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000001-W000000000001_Mod1", "Input_000000000003_to_Filter-out_H000000000001-W000000000002_Mod1", "Input_000000000003_to_Filter-out_H000000000001-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000001-W000000000004_Mod1", "Input_000000000003_to_Filter-out_H000000000002-W000000000001_Mod1", "Input_000000000003_to_Filter-out_H000000000002-W000000000002_Mod1", "Input_000000000003_to_Filter-out_H000000000002-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000002-W000000000004_Mod1", "Input_000000000003_to_Filter-out_H000000000003-W000000000001_Mod1", "Input_000000000003_to_Filter-out_H000000000003-W000000000002_Mod1", "Input_000000000003_to_Filter-out_H000000000003-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000003-W000000000004_Mod1", "Input_000000000003_to_Filter-out_H000000000004-W000000000001_Mod1", "Input_000000000003_to_Filter-out_H000000000004-W000000000002_Mod1", "Input_000000000003_to_Filter-out_H000000000004-W000000000003_Mod1", "Input_000000000003_to_Filter-out_H000000000004-W000000000004_Mod1" }; // TODO } /* Comprehensive model builder tests to check for the correct error propogation */ BOOST_AUTO_TEST_CASE(checkStableSoftMaxXEntropy) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the fully connected std::vector<std::string> node_names = model_builder.addFullyConnected(model, "Output", "Output", node_names_input, output_size, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.0f, 0.0f, true, true); // make the softmax std::vector<std::string> node_names_output = model_builder.addStableSoftMax(model, "SoftMax", "Mod1", node_names, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 1}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 1} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<NegativeLogLikelihoodLossTensorOp<float, Eigen::DefaultDevice>>(NegativeLogLikelihoodLossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<NegativeLogLikelihoodLossGradTensorOp<float, Eigen::DefaultDevice>>(NegativeLogLikelihoodLossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 0.346573591, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 0.5, 0.5 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Output-bias_000000000000_to_Output_000000000000", "Output-bias_000000000001_to_Output_000000000001", "Input_000000000000_to_Output_000000000000", "Input_000000000000_to_Output_000000000001", "Input_000000000000_to_Output_000000000000", "Input_000000000000_to_Output_000000000001" }; std::vector<float> weight_values_test = { 0, 0, 0.0486075282, -0.0873061419, 0.0486075282, -0.0873061419 }; for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(checkFullyConnectedWithXEntropyWLogits) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", input_size, true); // make the fully connected std::vector<std::string> node_names_output = model_builder.addFullyConnected(model, "Output", "Output", node_names_input, output_size, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 0.0f, 0.0f, true, true); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, input_size); input_values.setValues({ {{1, 1}} }); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setValues({ {0, 1} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<CrossEntropyWithLogitsLossTensorOp<float, Eigen::DefaultDevice>>(CrossEntropyWithLogitsLossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<CrossEntropyWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>>(CrossEntropyWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 0.346573591, 1e-4); // test for the expected node outputs std::vector<float> output_values_test = { 2, 2 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } // test for the expected weights std::vector<std::string> weight_names = { "Output-bias_000000000000_to_Output_000000000000", "Output-bias_000000000001_to_Output_000000000001", "Input_000000000000_to_Output_000000000000", "Input_000000000000_to_Output_000000000001", "Input_000000000000_to_Output_000000000000", "Input_000000000000_to_Output_000000000001" }; std::vector<float> weight_values_test = { 0, 0, 0.899999976, 0.949999988, 0.899999976, 0.949999988 }; // option 1 //std::vector<float> weight_values_test = { 0, 0, 1, 0.900000215, 1, 0.900000215 }; // option 2 for (int i = 0; i < weight_names.size(); ++i) { //std::cout << weight_names.at(i) << " Weight: " << model.getWeightsMap().at(weight_names.at(i))->getWeight() << std::endl; BOOST_CHECK_CLOSE(model.getWeightsMap().at(weight_names.at(i))->getWeight(), weight_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addGaussian_1) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> mu_node_names = model_builder.addInputNodes(model, "Mu", "Input", input_size); std::vector<std::string> logvar_node_names = model_builder.addInputNodes(model, "LogVar", "Input", input_size); std::vector<std::string> gaussian_node_names = model_builder.addInputNodes(model, "Gaussian", "Input", input_size); std::vector<std::string> node_names_input; for (const std::string& node_name : mu_node_names) node_names_input.push_back(node_name); for (const std::string& node_name : logvar_node_names) node_names_input.push_back(node_name); for (const std::string& node_name : gaussian_node_names) node_names_input.push_back(node_name); // make the fully connected std::vector<std::string> node_names_output = model_builder.addGaussian_( model, "GaussianDiff", "Mod1", mu_node_names, logvar_node_names, gaussian_node_names, false); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, (int)node_names_input.size()); input_values.setConstant(1); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setConstant(0); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0)<<std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 0.0123303831, 1e-4); // test for the expected node outputs /* def gaussian(x, mu, sigma): scaling = 1.0 / np.sqrt(2.0 * 3.14159265359 * (sigma ** 2)) bell = np.exp(- (x - mu) ** 2 / (2.0 * sigma ** 2)) return scaling * bell gaussian(1,1,np.exp(1)) = 0.1467626631737351 */ std::vector<float> output_values_test = { 0.157037467, 0.157037467 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_CASE(addMixedGaussianPior1) { ModelBuilder<float> model_builder; Model<float> model; const int batch_size = 1; const int memory_size = 1; const int input_size = 2; const int output_size = 2; // make the input std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "GaussianIn", "Input", input_size); // make the fully connected std::vector<std::string> node_names_output = model_builder.addMixedGaussianPior( model, "GaussianDiff", "Mod1", node_names_input, 1, 3, 0.5, false); // Specify the output node types manually for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); // interpret and train the model Eigen::Tensor<float, 3> input_values(batch_size, memory_size, (int)node_names_input.size()); input_values.setConstant(1); Eigen::Tensor<float, 2> output_values(batch_size, output_size); output_values.setConstant(0); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_function_grad = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); trainModel(model, node_names_input, node_names_output, input_values, output_values, batch_size, memory_size, loss_function, loss_function_grad); // test for the expected model error //std::cout << "Model error: " << model.getError()(0, 0)<<std::endl; BOOST_CHECK_CLOSE(model.getError()(0, 0), 0.00308103464, 1e-4); // test for the expected node outputs /* def gaussian(x, mu, sigma): scaling = 1.0 / np.sqrt(2.0 * 3.14159265359 * (sigma ** 2)) bell = np.exp(- (x - mu) ** 2 / (2.0 * sigma ** 2)) return scaling * bell def scale_mixture_prior(x, sigma_p1, sigma_p2, pi): first_gaussian = pi * gaussian(x, 0., sigma_p1) second_gaussian = (1 - pi) * gaussian(x, 0., sigma_p2) return first_gaussian + second_gaussian scale_mixture_prior(1, np.exp(1), np.exp(3), 0.5) */ std::vector<float> output_values_test = { 0.0784988478, 0.0784988478 }; for (int i = 0; i < node_names_output.size(); ++i) { //std::cout << node_names_output.at(i) << " Output: " << model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0) << std::endl; BOOST_CHECK_CLOSE(model.getNodesMap().at(node_names_output.at(i))->getOutput()(0, 0), output_values_test.at(i), 1e-4); } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Helloworld test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/core/Helloworld.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(helloworld) BOOST_AUTO_TEST_CASE(constructor) { Helloworld* ptr = nullptr; Helloworld* nullPointer = nullptr; ptr = new Helloworld(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { Helloworld* ptr = nullptr; ptr = new Helloworld(); delete ptr; } BOOST_AUTO_TEST_CASE(addNumbers) { Helloworld hw; double test = hw.addNumbers(2.0, 2.0); BOOST_CHECK_EQUAL(test, 4.0); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELLOGGER_H #define EVONET_MODELLOGGER_H // .h #include <EvoNet/ml/Model.h> #include <EvoNet/io/CSVWriter.h> #include <vector> // .cpp #include <ctime> // time format #include <chrono> // current time #include <set> namespace EvoNet { /** @brief Class to log model training metrics */ template<typename TensorT> class ModelLogger { public: ModelLogger() = default; ///< Default constructor ModelLogger(const bool& log_time_epoch, const bool& log_train_val_metric_epoch = false, const bool& log_expected_epoch = false, const bool& log_weights_epoch = false, const bool& log_node_errors_epoch = false, const bool& log_node_outputs_epoch = false, const bool& log_node_derivatives_epoch = false, const bool& log_node_inputs_epoch = false); ~ModelLogger() = default; ///< Default destructor void setLogTimeEpoch(const bool& log_time_epoch) { log_time_epoch_ = log_time_epoch; } void setLogTrainValMetricEpoch(const bool& log_train_val_metric_epoch) { log_train_val_metric_epoch_ = log_train_val_metric_epoch; } void setLogExpectedEpoch(const bool& log_expected_epoch) { log_expected_epoch_ = log_expected_epoch; } void setLogNodeOutputsEpoch(const bool& log_node_outputs_epoch) { log_node_outputs_epoch_ = log_node_outputs_epoch; } void setLogNodeInputsEpoch(const bool& log_node_inputs_epoch) { log_node_inputs_epoch_ = log_node_inputs_epoch; } void setLogDir(const std::string& log_dir) { log_dir_ = log_dir; } bool getLogTimeEpoch() { return log_time_epoch_; } bool getLogTrainValMetricEpoch() { return log_train_val_metric_epoch_; } bool getLogExpectedEpoch() { return log_expected_epoch_; } bool getLogWeightsEpoch() { return log_weights_epoch_; } bool getLogNodeErrorsEpoch() { return log_node_errors_epoch_; } bool getLogNodeOutputsEpoch() { return log_node_outputs_epoch_; } bool getLogNodeDerivativesEpoch() { return log_node_derivatives_epoch_; } bool getLogNodeInputsEpoch() { return log_node_inputs_epoch_; } std::string getLogDir() { return log_dir_; } CSVWriter getLogTimeEpochCSVWriter() { return log_time_epoch_csvwriter_; } CSVWriter getLogTrainValMetricEpochCSVWriter() { return log_train_val_metric_epoch_csvwriter_; } CSVWriter getLogExpectedEpochCSVWriter() { return log_expected_epoch_csvwriter_; } CSVWriter getLogWeightsEpochCSVWriter() { return log_weights_epoch_csvwriter_; } CSVWriter getLogNodeErrorsEpochCSVWriter() { return log_node_errors_epoch_csvwriter_; } CSVWriter getLogNodeOutputsEpochCSVWriter() { return log_node_outputs_epoch_csvwriter_; } CSVWriter getLogNodeDerivativesEpochCSVWriter() { return log_node_derivatives_epoch_csvwriter_; } CSVWriter getLogNodeInputsEpochCSVWriter() { return log_node_inputs_epoch_csvwriter_; } /** @brief Initialize the log files @param[in] model @returns True for a successfull write operation */ bool initLogs(Model<TensorT> & model); /** @brief Initialize the log files @param[in] model @returns True for a successfull write operation */ bool writeLogs(Model<TensorT> & model, const int& n_epochs, const std::vector<std::string>& training_metric_names, const std::vector<std::string>& validation_metric_names, const std::vector<TensorT>& training_metrics, const std::vector<TensorT>& validation_metrics, const std::vector<std::string>& output_node_names, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& node_error_names, const std::vector<std::string>& node_output_names, const std::vector<std::string>& node_derivative_names, const std::vector<std::string>& node_input_names, const std::vector<std::string>& weight_names); /** @brief Log epoch iteration number vs. time @param[in] n_epoch @param[in] time_stamp @returns True for a successfull write operation */ bool logTimePerEpoch(Model<TensorT> & model, const int& n_epoch); /** @brief Log training/validation metrics per epoch @param[in] model @param[in] training_metric_names ... @param[in] n_epoch @returns True for a successfull write operation */ bool logTrainValMetricsPerEpoch(Model<TensorT>& model, const std::vector<std::string>& training_metric_names, const std::vector<std::string>& validation_metric_names, const std::vector<TensorT>& training_metrics, const std::vector<TensorT>& validation_metrics, const int& n_epoch); /** @brief Model<TensorT> predicted output and expected output for each batch for each time step per epoch @param[in] model @param[in] output_node_names Names of the output nodes @param[in] n_epoch @returns True for a successfull write operation */ bool logExpectedOutputPerEpoch(Model<TensorT>& model, const std::vector<std::string>& output_node_names, const Eigen::Tensor<TensorT, 3>& expected_values, const int& n_epoch); /** @brief Model<TensorT> weight update ratio for each link for each time step per epoch @param[in] model @param[in] n_epoch @returns True for a successfull write operation */ bool logWeightsPerEpoch(Model<TensorT>& model, const int& n_epoch, const std::vector<std::string>& weight_names); /** @brief Model<TensorT> node errors for each time step per epoch @param[in] model @param[in] n_epoch @returns True for a successfull write operation */ bool logNodeErrorsPerEpoch(Model<TensorT>& model, const int& n_epoch, const std::vector<std::string>& node_names); /** @brief Model<TensorT> node outputs for each time step per epoch @param[in] model @param[in] n_epoch @returns True for a successfull write operation */ bool logNodeOutputsPerEpoch(Model<TensorT>& model, const int& n_epoch, const std::vector<std::string>& node_names); /** @brief Model<TensorT> node derivatives for each time step per epoch @param[in] model @param[in] n_epoch @returns True for a successfull write operation */ bool logNodeDerivativesPerEpoch(Model<TensorT>& model, const int& n_epoch, const std::vector<std::string>& node_names); /** @brief Model<TensorT> node inputs for each time step per epoch @param[in] model @param[in] n_epoch @returns True for a successfull write operation */ bool logNodeInputsPerEpoch(Model<TensorT>& model, const int& n_epoch, const std::vector<std::string>& node_names); private: std::string log_dir_ = ""; bool log_time_epoch_ = false; CSVWriter log_time_epoch_csvwriter_; bool log_train_val_metric_epoch_ = false; CSVWriter log_train_val_metric_epoch_csvwriter_; bool log_expected_epoch_ = false; CSVWriter log_expected_epoch_csvwriter_; bool log_weights_epoch_ = false; CSVWriter log_weights_epoch_csvwriter_; bool log_node_errors_epoch_ = false; CSVWriter log_node_errors_epoch_csvwriter_; bool log_node_outputs_epoch_ = false; CSVWriter log_node_outputs_epoch_csvwriter_; bool log_node_derivatives_epoch_ = false; CSVWriter log_node_derivatives_epoch_csvwriter_; bool log_node_inputs_epoch_ = false; CSVWriter log_node_inputs_epoch_csvwriter_; }; template<typename TensorT> ModelLogger<TensorT>::ModelLogger(const bool& log_time_epoch, const bool& log_train_val_metric_epoch, const bool& log_expected_epoch, const bool& log_weights_epoch, const bool& log_node_errors_epoch, const bool& log_node_outputs_epoch, const bool& log_node_derivatives_epoch, const bool& log_node_inputs_epoch) : log_time_epoch_(log_time_epoch), log_train_val_metric_epoch_(log_train_val_metric_epoch), log_expected_epoch_(log_expected_epoch), log_weights_epoch_(log_weights_epoch), log_node_errors_epoch_(log_node_errors_epoch), log_node_outputs_epoch_(log_node_outputs_epoch), log_node_derivatives_epoch_(log_node_derivatives_epoch), log_node_inputs_epoch_(log_node_inputs_epoch) { } template<typename TensorT> bool ModelLogger<TensorT>::initLogs(Model<TensorT>& model) { if (log_time_epoch_) { std::string filename = log_dir_ + model.getName() + "_TimePerEpoch.csv"; CSVWriter csvwriter(filename); log_time_epoch_csvwriter_ = csvwriter; } if (log_train_val_metric_epoch_) { std::string filename = log_dir_ + model.getName() + "_TrainValMetricsPerEpoch.csv"; CSVWriter csvwriter(filename); log_train_val_metric_epoch_csvwriter_ = csvwriter; } if (log_expected_epoch_) { std::string filename = log_dir_ + model.getName() + "_ExpectedPerEpoch.csv"; CSVWriter csvwriter(filename); log_expected_epoch_csvwriter_ = csvwriter; } if (log_weights_epoch_) { std::string filename = log_dir_ + model.getName() + "_WeightsPerEpoch.csv"; CSVWriter csvwriter(filename); log_weights_epoch_csvwriter_ = csvwriter; } if (log_node_errors_epoch_) { std::string filename = log_dir_ + model.getName() + "_NodeErrorsPerEpoch.csv"; CSVWriter csvwriter(filename); log_node_errors_epoch_csvwriter_ = csvwriter; } if (log_node_outputs_epoch_) { std::string filename = log_dir_ + model.getName() + "_NodeOutputsPerEpoch.csv"; CSVWriter csvwriter(filename); log_node_outputs_epoch_csvwriter_ = csvwriter; } if (log_node_derivatives_epoch_) { std::string filename = log_dir_ + model.getName() + "_NodeDerivativesPerEpoch.csv"; CSVWriter csvwriter(filename); log_node_derivatives_epoch_csvwriter_ = csvwriter; } if (log_node_inputs_epoch_) { std::string filename = log_dir_ + model.getName() + "_NodeInputsPerEpoch.csv"; CSVWriter csvwriter(filename); log_node_inputs_epoch_csvwriter_ = csvwriter; } return true; } template<typename TensorT> bool ModelLogger<TensorT>::writeLogs(Model<TensorT>& model, const int & n_epochs, const std::vector<std::string>& training_metric_names, const std::vector<std::string>& validation_metric_names, const std::vector<TensorT>& training_metrics, const std::vector<TensorT>& validation_metrics, const std::vector<std::string>& output_node_names, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& node_error_names, const std::vector<std::string>& node_output_names, const std::vector<std::string>& node_derivative_names, const std::vector<std::string>& node_input_names, const std::vector<std::string>& weight_names) { if (log_time_epoch_) { logTimePerEpoch(model, n_epochs); } if (log_train_val_metric_epoch_) { logTrainValMetricsPerEpoch(model, training_metric_names, validation_metric_names, training_metrics, validation_metrics, n_epochs); } if (log_expected_epoch_) { logExpectedOutputPerEpoch(model, output_node_names, expected_values, n_epochs); } if (log_weights_epoch_) { logWeightsPerEpoch(model, n_epochs, weight_names); } if (log_node_errors_epoch_) { logNodeErrorsPerEpoch(model, n_epochs, node_error_names); } if (log_node_outputs_epoch_) { logNodeOutputsPerEpoch(model, n_epochs, node_output_names); } if (log_node_derivatives_epoch_) { logNodeDerivativesPerEpoch(model, n_epochs, node_derivative_names); } if (log_node_inputs_epoch_) { logNodeInputsPerEpoch(model, n_epochs, node_input_names); } return true; } template<typename TensorT> bool ModelLogger<TensorT>::logTimePerEpoch(Model<TensorT>& model, const int & n_epoch) { // writer header if (log_time_epoch_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Epoch", "TimeStamp", "Milliseconds" }; log_time_epoch_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // TimeStamp std::chrono::time_point<std::chrono::system_clock> time_now = std::chrono::system_clock::now(); std::time_t time_now_t = std::chrono::system_clock::to_time_t(time_now); std::tm now_tm = *std::localtime(&time_now_t); char timestamp[64]; std::strftime(timestamp, 64, "%Y-%m-%d-%H-%M-%S", &now_tm); std::string time_stamp(timestamp); // Current time in milliseconds since 1970 auto now = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()).count(); std::string milli_now = std::to_string(now); // write next entry std::vector<std::string> line = { std::to_string(n_epoch), time_stamp, milli_now }; log_time_epoch_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } template<typename TensorT> bool ModelLogger<TensorT>::logTrainValMetricsPerEpoch(Model<TensorT>& model, const std::vector<std::string>& training_metric_names, const std::vector<std::string>& validation_metric_names, const std::vector<TensorT>& training_metrics, const std::vector<TensorT>& validation_metrics, const int & n_epoch) { // writer header if (log_train_val_metric_epoch_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Epoch" }; for (const std::string& metric_name : training_metric_names) { std::string metric = "Training_" + metric_name; headers.push_back(metric); } for (const std::string& metric_name : validation_metric_names) { std::string metric = "Validation_" + metric_name; headers.push_back(metric); } log_train_val_metric_epoch_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // write next entry std::vector<std::string> line = { std::to_string(n_epoch) }; for (const TensorT& metric : training_metrics) { line.push_back(std::to_string(metric)); } for (const TensorT& metric : validation_metrics) { line.push_back(std::to_string(metric)); } log_train_val_metric_epoch_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } template<typename TensorT> bool ModelLogger<TensorT>::logExpectedOutputPerEpoch(Model<TensorT>& model, const std::vector<std::string>& output_node_names, const Eigen::Tensor<TensorT, 3>& expected_values, const int & n_epoch) { std::pair<int, int> bmsizes = model.getBatchAndMemorySizes(); int batch_size = bmsizes.first; int memory_size = bmsizes.second; assert(output_node_names.size() == expected_values.dimension(2)); // writer header if (log_expected_epoch_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Epoch" }; for (const std::string& node_name : output_node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { std::string expected = node_name + "_Expected_Batch-" + std::to_string(batch_iter) + "_Memory-" + std::to_string(memory_iter); headers.push_back(expected); } } } log_expected_epoch_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // write next entry std::vector<std::string> line = { std::to_string(n_epoch) }; int node_cnt = 0; for (const std::string& node_name : output_node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // NOTE: if an error is thrown here, check that all output nodes have been cached in the model (see `model::setInputAndOutputNodes()`) line.push_back(std::to_string(expected_values(batch_iter, memory_iter, node_cnt))); } } ++node_cnt; } log_expected_epoch_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } template<typename TensorT> bool ModelLogger<TensorT>::logWeightsPerEpoch(Model<TensorT>& model, const int & n_epoch, const std::vector<std::string>& weight_names) { // write headers if (log_weights_epoch_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Epoch" }; for (const std::string weight_name : weight_names) { headers.push_back(weight_name); } log_weights_epoch_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // write the next entry std::vector<std::string> line = { std::to_string(n_epoch) }; for (const std::string weight_name : weight_names) { line.push_back(std::to_string(model.weights_.at(weight_name)->getWeight())); } log_weights_epoch_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } template<typename TensorT> bool ModelLogger<TensorT>::logNodeErrorsPerEpoch(Model<TensorT>& model, const int & n_epoch, const std::vector<std::string>& node_names) { std::pair<int, int> bmsizes = model.getBatchAndMemorySizes(); int batch_size = bmsizes.first; int memory_size = bmsizes.second; // writer header if (log_node_errors_epoch_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Epoch" }; for (const std::string& node_name : node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { std::string node_error = node_name + "_Error_Batch-" + std::to_string(batch_iter) + "_Memory-" + std::to_string(memory_iter); headers.push_back(node_error); } } } log_node_errors_epoch_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // write next entry if (model.nodes_.at(node_names.front())->getError().size() < batch_size * memory_size) return false; std::vector<std::string> line = { std::to_string(n_epoch) }; int node_cnt = 0; for (const std::string& node_name : node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { line.push_back(std::to_string(model.nodes_.at(node_name)->getError()(batch_iter, memory_iter))); } } } log_node_errors_epoch_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } template<typename TensorT> bool ModelLogger<TensorT>::logNodeOutputsPerEpoch(Model<TensorT>& model, const int & n_epoch, const std::vector<std::string>& node_names) { std::pair<int, int> bmsizes = model.getBatchAndMemorySizes(); int batch_size = bmsizes.first; int memory_size = bmsizes.second; // writer header if (log_node_outputs_epoch_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Epoch" }; for (const std::string& node_name : node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { std::string node_output = node_name + "_Output_Batch-" + std::to_string(batch_iter) + "_Memory-" + std::to_string(memory_iter); headers.push_back(node_output); } } } log_node_outputs_epoch_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // write next entry if (model.nodes_.at(node_names.front())->getOutput().size() < batch_size * memory_size) return false; std::vector<std::string> line = { std::to_string(n_epoch) }; int node_cnt = 0; for (const std::string& node_name : node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { line.push_back(std::to_string(model.nodes_.at(node_name)->getOutput()(batch_iter, memory_iter))); } } } log_node_outputs_epoch_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } template<typename TensorT> bool ModelLogger<TensorT>::logNodeDerivativesPerEpoch(Model<TensorT>& model, const int & n_epoch, const std::vector<std::string>& node_names) { std::pair<int, int> bmsizes = model.getBatchAndMemorySizes(); int batch_size = bmsizes.first; int memory_size = bmsizes.second; // writer header if (log_node_derivatives_epoch_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Epoch" }; for (const std::string& node_name : node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { std::string node_derivative = node_name + "_Derivative_Batch-" + std::to_string(batch_iter) + "_Memory-" + std::to_string(memory_iter); headers.push_back(node_derivative); } } } log_node_derivatives_epoch_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // write next entry if (model.nodes_.at(node_names.front())->getDerivative().size() < batch_size * memory_size) return false; std::vector<std::string> line = { std::to_string(n_epoch) }; int node_cnt = 0; for (const std::string& node_name : node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { line.push_back(std::to_string(model.nodes_.at(node_name)->getDerivative()(batch_iter, memory_iter))); } } } log_node_derivatives_epoch_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } template<typename TensorT> inline bool ModelLogger<TensorT>::logNodeInputsPerEpoch(Model<TensorT>& model, const int & n_epoch, const std::vector<std::string>& node_names) { std::pair<int, int> bmsizes = model.getBatchAndMemorySizes(); int batch_size = bmsizes.first; int memory_size = bmsizes.second; // writer header if (log_node_inputs_epoch_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Epoch" }; for (const std::string& node_name : node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { std::string node_derivative = node_name + "_Input_Batch-" + std::to_string(batch_iter) + "_Memory-" + std::to_string(memory_iter); headers.push_back(node_derivative); } } } log_node_inputs_epoch_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // write next entry if (model.nodes_.at(node_names.front())->getInput().size() < batch_size * memory_size) return false; std::vector<std::string> line = { std::to_string(n_epoch) }; int node_cnt = 0; for (const std::string& node_name : node_names) { for (size_t batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (size_t memory_iter = 0; memory_iter < memory_size; ++memory_iter) { line.push_back(std::to_string(model.nodes_.at(node_name)->getInput()(batch_iter, memory_iter))); } } } log_node_inputs_epoch_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } } #endif //EVONET_MODELLOGGER_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE EMGModel test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/EMGModel.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(emgmodel) template <typename TensorT> class EMGModel_test: public EMGModel<TensorT> { public: TensorT z_(const TensorT& x_I) const { return EMGModel::z_(x_I); } TensorT EMGPDF1_(const TensorT& x_I) const { return EMGModel::EMGPDF1_(x_I); } TensorT EMGPDF2_(const TensorT& x_I) const { return EMGModel::EMGPDF2_(x_I); } TensorT EMGPDF3_(const TensorT& x_I) const { return EMGModel::EMGPDF3_(x_I); } }; BOOST_AUTO_TEST_CASE(constructor) { EMGModel<double>* ptr = nullptr; EMGModel<double>* nullPointer = nullptr; ptr = new EMGModel<double>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { EMGModel<double>* ptr = nullptr; ptr = new EMGModel<double>(); delete ptr; } BOOST_AUTO_TEST_CASE(constructor2) { EMGModel<double> emg(1.0, 2.0, 3.0, 4.0); BOOST_CHECK_EQUAL(emg.getH(), 1.0); BOOST_CHECK_EQUAL(emg.getTau(), 2.0); BOOST_CHECK_EQUAL(emg.getMu(), 3.0); BOOST_CHECK_EQUAL(emg.getSigma(), 4.0); } BOOST_AUTO_TEST_CASE(gettersAndSetters) { EMGModel<double> emg; emg.setH(1.0); emg.setTau(2.0); emg.setMu(3.0); emg.setSigma(4.0); BOOST_CHECK_EQUAL(emg.getH(), 1.0); BOOST_CHECK_EQUAL(emg.getTau(), 2.0); BOOST_CHECK_EQUAL(emg.getMu(), 3.0); BOOST_CHECK_EQUAL(emg.getSigma(), 4.0); } BOOST_AUTO_TEST_CASE(z_) { EMGModel_test<double> emg; emg.setH(1.0); emg.setTau(0.1); emg.setMu(0.0); emg.setSigma(1.0); BOOST_CHECK_CLOSE(emg.z_(-100), 77.781745930520216, 1e-6); BOOST_CHECK_CLOSE(emg.z_(-10), 14.142135623730949, 1e-6); BOOST_CHECK_CLOSE(emg.z_(0), 7.0710678118654746, 1e-6); BOOST_CHECK_CLOSE(emg.z_(10), 0.0, 1e-6); BOOST_CHECK_CLOSE(emg.z_(100), -63.639610306789272, 1e-6); emg.setTau(1e-12); BOOST_CHECK_CLOSE(emg.z_(0), 707106781186.54749, 1e-6); } BOOST_AUTO_TEST_CASE(emgpdf1_) { EMGModel_test<double> emg; emg.setH(1.0); emg.setTau(0.1); emg.setMu(0.0); emg.setSigma(1.0); BOOST_CHECK_CLOSE(emg.EMGPDF1_(100), 0.0, 1e-6); } BOOST_AUTO_TEST_CASE(emgpdf2_) { EMGModel_test<double> emg; emg.setH(1.0); emg.setTau(0.1); emg.setMu(0.0); emg.setSigma(1.0); BOOST_CHECK_CLOSE(emg.EMGPDF2_(0), 0.99028596471732067, 1e-6); } BOOST_AUTO_TEST_CASE(emgpdf3_) { EMGModel_test<double> emg; emg.setH(1.0); emg.setTau(1e-12); emg.setMu(0.0); emg.setSigma(1.0); BOOST_CHECK_CLOSE(emg.EMGPDF3_(0), 1.0, 1e-6); } BOOST_AUTO_TEST_CASE(emgpdf) { EMGModel<double> emg; emg.setH(1.0); emg.setTau(0.1); emg.setMu(0.0); emg.setSigma(1.0); BOOST_CHECK_CLOSE(emg.PDF(100), 0.0, 1e-6); BOOST_CHECK_CLOSE(emg.PDF(0), 0.99028596471732067, 1e-6); emg.setTau(1e-12); BOOST_CHECK_CLOSE(emg.PDF(0), 1.0, 1e-6); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Solver test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/Solver.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(solver) /** SGDOp Tests */ BOOST_AUTO_TEST_CASE(constructorSGDOp) { SGDOp<float>* ptrSGD = nullptr; SGDOp<float>* nullPointerSGD = nullptr; BOOST_CHECK_EQUAL(ptrSGD, nullPointerSGD); } BOOST_AUTO_TEST_CASE(destructorSGDOp) { SGDOp<float>* ptrSGD = nullptr; ptrSGD = new SGDOp<float>(); delete ptrSGD; } BOOST_AUTO_TEST_CASE(settersAndGetters) { // Check the default SGD constructor SGDOp<float> sgd_op(0.9f, 0.1f); BOOST_CHECK_EQUAL(sgd_op.getName(), "SGDOp"); BOOST_CHECK_CLOSE(sgd_op.getLearningRate(), 0.9, 1e-3); BOOST_CHECK_CLOSE(sgd_op.getMomentum(), 0.1, 1e-3); BOOST_CHECK_CLOSE(sgd_op.getMomentumPrev(), 0.0, 1e-3); BOOST_CHECK_EQUAL(sgd_op.getParamsAsStr(), "gradient_threshold:1000000.000000;gradient_noise_sigma:0.000000;gradient_noise_gamma:0.550000;learning_rate:0.900000;momentum:0.100000;momentum_prev:0.000000"); BOOST_CHECK_CLOSE(sgd_op.getGradientThreshold(), 1e6, 1e-3); BOOST_CHECK_CLOSE(sgd_op.getGradientNoiseSigma(), 0.0, 1e-3); BOOST_CHECK_CLOSE(sgd_op.getGradientNoiseGamma(), 0.55, 1e-3); // Check the SGD getters/setters sgd_op.setLearningRate(0.8); sgd_op.setMomentum(0.2); sgd_op.setMomentumPrev(0.1); BOOST_CHECK_CLOSE(sgd_op.getLearningRate(), 0.8, 1e-3); BOOST_CHECK_CLOSE(sgd_op.getMomentum(), 0.2, 1e-3); BOOST_CHECK_CLOSE(sgd_op.getMomentumPrev(), 0.1, 1e-3); // Check the SGD constructor SGDOp<float> sgd_op2(0.9f, 0.1f, 10.0f, 1.0f, 0.55); BOOST_CHECK_CLOSE(sgd_op2.getGradientThreshold(), 10.0f, 1e-3); BOOST_CHECK_CLOSE(sgd_op2.getGradientNoiseSigma(), 1.0, 1e-3); BOOST_CHECK_CLOSE(sgd_op2.getGradientNoiseGamma(), 0.55, 1e-3); // Check the default SSD constructor SSDOp<float> ssd_op(0.9f, 0.1f); BOOST_CHECK_EQUAL(ssd_op.getName(), "SSDOp"); BOOST_CHECK_CLOSE(ssd_op.getLearningRate(), 0.9, 1e-3); BOOST_CHECK_CLOSE(ssd_op.getMomentum(), 0.1, 1e-3); BOOST_CHECK_CLOSE(ssd_op.getMomentumPrev(), 0.0, 1e-3); BOOST_CHECK_EQUAL(ssd_op.getParamsAsStr(), "gradient_threshold:1000000.000000;gradient_noise_sigma:0.000000;gradient_noise_gamma:0.550000;learning_rate:0.900000;momentum:0.100000;momentum_prev:0.000000"); BOOST_CHECK_CLOSE(ssd_op.getGradientThreshold(), 1e6, 1e-3); BOOST_CHECK_CLOSE(ssd_op.getGradientNoiseSigma(), 0.0, 1e-3); BOOST_CHECK_CLOSE(ssd_op.getGradientNoiseGamma(), 0.55, 1e-3); // Check the SSD getters/setters ssd_op.setLearningRate(0.8); ssd_op.setMomentum(0.2); ssd_op.setMomentumPrev(0.1); BOOST_CHECK_CLOSE(ssd_op.getLearningRate(), 0.8, 1e-3); BOOST_CHECK_CLOSE(ssd_op.getMomentum(), 0.2, 1e-3); BOOST_CHECK_CLOSE(ssd_op.getMomentumPrev(), 0.1, 1e-3); // Check the SSD constructor SSDOp<float> ssd_op2(0.9f, 0.1f, 10.0f, 1.0f, 0.55); BOOST_CHECK_CLOSE(ssd_op2.getGradientThreshold(), 10.0f, 1e-3); BOOST_CHECK_CLOSE(ssd_op2.getGradientNoiseSigma(), 1.0, 1e-3); BOOST_CHECK_CLOSE(ssd_op2.getGradientNoiseGamma(), 0.55, 1e-3); // Check the default Adam constructor AdamOp<float> adam_op; BOOST_CHECK_EQUAL(adam_op.getName(), "AdamOp"); BOOST_CHECK_EQUAL(adam_op.getParamsAsStr(), "gradient_threshold:1000000.000000;gradient_noise_sigma:0.000000;gradient_noise_gamma:0.550000;learning_rate:0.001000;momentum:0.900000;momentum2:0.999000;delta:0.000000;momentum_prev:0.000000;momentum2_prev:0.000000"); // Check the Adam constructor AdamOp<float> adam_op2(0.1, 0.9, 0.999, 0.001, 10.0, 1.0, 0.55); BOOST_CHECK_CLOSE(adam_op2.getLearningRate(), 0.1, 1e-3); BOOST_CHECK_CLOSE(adam_op2.getMomentum(), 0.9, 1e-3); BOOST_CHECK_CLOSE(adam_op2.getMomentum2(), 0.999, 1e-3); BOOST_CHECK_CLOSE(adam_op2.getMomentumPrev(), 0.0, 1e-3); BOOST_CHECK_CLOSE(adam_op2.getMomentum2Prev(), 0.0, 1e-3); BOOST_CHECK_CLOSE(adam_op2.getDelta(), 0.001, 1e-3); BOOST_CHECK_CLOSE(adam_op2.getGradientThreshold(), 10.0f, 1e-3); BOOST_CHECK_CLOSE(adam_op2.getGradientNoiseSigma(), 1.0, 1e-3); BOOST_CHECK_CLOSE(adam_op2.getGradientNoiseGamma(), 0.55, 1e-3); // Check the default SVAG constructor SVAGOp<float> svag_op; BOOST_CHECK_EQUAL(svag_op.getName(), "SVAGOp"); BOOST_CHECK_EQUAL(svag_op.getParamsAsStr(), "gradient_threshold:1000000.000000;gradient_noise_sigma:0.000000;gradient_noise_gamma:0.550000;learning_rate:0.001000;momentum:0.900000;momentum_prev:0.000000;variance_prev:0.000000"); // Check the SVAG constructor SVAGOp<float> svag_op2(0.1, 0.9, 10.0, 1.0, 0.55); BOOST_CHECK_CLOSE(svag_op2.getLearningRate(), 0.1, 1e-3); BOOST_CHECK_CLOSE(svag_op2.getMomentum(), 0.9, 1e-3); BOOST_CHECK_CLOSE(svag_op2.getMomentumPrev(), 0.0, 1e-3); BOOST_CHECK_CLOSE(svag_op2.getVariancePrev(), 0.0, 1e-3); BOOST_CHECK_CLOSE(svag_op2.getGradientThreshold(), 10.0f, 1e-3); BOOST_CHECK_CLOSE(svag_op2.getGradientNoiseSigma(), 1.0, 1e-3); BOOST_CHECK_CLOSE(svag_op2.getGradientNoiseGamma(), 0.55, 1e-3); // Check the default Dummy constructor DummySolverOp<float> dummy_solver_op; BOOST_CHECK_EQUAL(dummy_solver_op.getName(), "DummySolverOp"); BOOST_CHECK_EQUAL(dummy_solver_op.getParamsAsStr(), ""); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelResources test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelResources.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(ModelResources1) BOOST_AUTO_TEST_CASE(ModelResourcesConstructor) { ModelResources* ptr = nullptr; ModelResources* nullPointer = nullptr; ptr = new ModelResources(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(ModelResourcesDestructor) { ModelResources* ptr = nullptr; ptr = new ModelResources(); delete ptr; } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_POPULATIONTRAINEREXPERIMENTALDEFAULTDEVICE_H #define EVONET_POPULATIONTRAINEREXPERIMENTALDEFAULTDEVICE_H // .h #include <EvoNet/ml/PopulationTrainerExperimental.h> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> // .cpp namespace EvoNet { /** @brief Class to train a vector of models */ template<typename TensorT> class PopulationTrainerExperimentalDefaultDevice : public PopulationTrainerExperimental<TensorT, ModelInterpreterDefaultDevice<TensorT>> { }; } #endif //EVONET_POPULATIONTRAINEREXPERIMENTALDEFAULTDEVICE_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_NODE_H #define EVONET_NODE_H // .h #include <EvoNet/ml/ActivationFunction.h> #include <EvoNet/ml/IntegrationFunction.h> #include <unsupported/Eigen/CXX11/Tensor> #include <memory> #include <vector> #include <cereal/access.hpp> // serialiation of private members #include <cereal/types/memory.hpp> #include <cereal/types/utility.hpp> // std::pair namespace EvoNet { enum class NodeStatus { // TODO: will these be the final set of states a node can be in? deactivated = 0, // Optional: utilized to indicate that there should be no change in node status initialized = 1, // Memory has been allocated for Tensors activated = 2, // Output has been calculated corrected = 3 // Error has been calculated }; enum class NodeType { input = 1, // No activation function bias = 2, // Value of 1 output = 3, hidden = 4, unmodifiable = 5, zero = 6, // value of 0 recursive = 7 // special case of hidden where the node should be treated as the source of any cyclic pair }; /** @brief Network Node */ template<typename TensorT> class Node { public: Node() = default; ///< Default constructor Node(const Node& other); ///< Copy constructor // [TODO: add test] Node(const std::string& name, const EvoNet::NodeType& type, const EvoNet::NodeStatus& status, const std::shared_ptr<ActivationOp<TensorT>>& activation, const std::shared_ptr<ActivationOp<TensorT>>& activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& integration_weight_grad); ///< Explicit constructor ~Node() = default; ///< Default destructor inline bool operator==(const Node& other) const { return std::tie( id_, type_, status_, //activation_->getName(), //activation_grad_->getName(), //integration_->getName(), //integration_error_->getName(), //integration_weight_grad_->getName(), name_, module_id_, module_name_, tensor_index_ ) == std::tie( other.id_, other.type_, other.status_, //other.activation_->getName(), //other.activation_grad_->getName(), //other.integration_->getName(), //other.integration_error_->getName(), //other.integration_weight_grad_->getName(), other.name_, other.module_id_, other.module_name_, other.tensor_index_ ) ; } inline bool operator!=(const Node& other) const { return !(*this == other); } inline Node& operator=(const Node& other) { // [TODO: add test] id_ = other.id_; name_ = other.name_; module_id_ = other.module_id_; module_name_ = other.module_name_; tensor_index_ = other.tensor_index_; layer_name_ = other.layer_name_; type_ = other.type_; activation_ = other.activation_; activation_grad_ = other.activation_grad_; integration_ = other.integration_; integration_error_ = other.integration_error_; integration_weight_grad_ = other.integration_weight_grad_; status_ = other.status_; output_min_ = other.output_min_; output_max_ = other.output_max_; drop_probability_ = other.drop_probability_; drop_ = other.drop_; return *this; } void setId(const int& id); ///< id setter int getId() const; ///< id getter void setName(const std::string& name); ///< naem setter std::string getName() const; ///< name getter void setType(const EvoNet::NodeType& type); ///< type setter EvoNet::NodeType getType() const; ///< type getter void setStatus(const EvoNet::NodeStatus& status); ///< status setter EvoNet::NodeStatus getStatus() const; ///< status getter void setActivation(const std::shared_ptr<ActivationOp<TensorT>>& activation); ///< activation setter std::shared_ptr<ActivationOp<TensorT>> getActivationShared() const; // [TODO: add tests] ActivationOp<TensorT>* getActivation() const; ///< activation getter void setActivationGrad(const std::shared_ptr<ActivationOp<TensorT>>& activation_grad); ///< activation setter std::shared_ptr<ActivationOp<TensorT>> getActivationGradShared() const; // [TODO: add tests] ActivationOp<TensorT>* getActivationGrad() const; ///< activation getter void setIntegration(const std::shared_ptr<IntegrationOp<TensorT>>& integration); ///< integration setter std::shared_ptr<IntegrationOp<TensorT>> getIntegrationShared() const; // [TODO: add tests] IntegrationOp<TensorT>* getIntegration() const; ///< integration getter void setIntegrationError(const std::shared_ptr<IntegrationErrorOp<TensorT>>& integration); ///< integration setter std::shared_ptr<IntegrationErrorOp<TensorT>> getIntegrationErrorShared() const; // [TODO: add tests] IntegrationErrorOp<TensorT>* getIntegrationError() const; ///< integration getter void setIntegrationWeightGrad(const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& integration); ///< integration setter std::shared_ptr<IntegrationWeightGradOp<TensorT>> getIntegrationWeightGradShared() const; // [TODO: add tests] IntegrationWeightGradOp<TensorT>* getIntegrationWeightGrad() const; ///< integration getter void setModuleId(const int& module_id); ///< module id setter int getModuleId() const; ///< module id getter void setTensorIndex(const std::pair<int, int>& layer_id); ///< layer id setter std::pair<int, int> getTensorIndex() const; ///< layer id getter void setModuleName(const std::string& module_name); ///< module name setter std::string getModuleName() const; ///< module name getter void setLayerName(const std::string& layer_name); ///< layer name setter std::string getLayerName() const; ///< layer name getter void setOutputMin(const TensorT& min_output); ///< min output setter void setOutputMax(const TensorT& output_max); ///< max output setter void setDropProbability(const TensorT& drop_probability); ///< drop_probability setter TensorT getDropProbability() const; ///< drop_probability getter void setDrop(const Eigen::Tensor<TensorT, 2>& drop); ///< drop setter Eigen::Tensor<TensorT, 2> getDrop() const; ///< drop copy getter void setInput(const Eigen::Tensor<TensorT, 2>& input); ///< input setter Eigen::Tensor<TensorT, 2> getInput() const; ///< input copy getter void setOutput(const Eigen::Tensor<TensorT, 2>& output); ///< output setter Eigen::Tensor<TensorT, 2> getOutput() const; ///< output copy getter void setError(const Eigen::Tensor<TensorT, 2>& error); ///< error setter Eigen::Tensor<TensorT, 2> getError() const; ///< error copy getter void setDerivative(const Eigen::Tensor<TensorT, 2>& derivative); ///< derivative setter Eigen::Tensor<TensorT, 2> getDerivative() const; ///< derivative copy getter void setDt(const Eigen::Tensor<TensorT, 2>& dt); ///< dt setter Eigen::Tensor<TensorT, 2> getDt() const; ///< dt copy getter Node<TensorT> getNodeCopy(const Node<TensorT>& other) const; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(id_, name_, module_id_, module_name_, layer_name_, output_max_, output_min_, tensor_index_, type_, status_, activation_, activation_grad_, integration_, integration_error_, integration_weight_grad_ ); } int id_ = -1; ///< Node ID (used internally by Model) std::string name_ = ""; ///< Node Name int module_id_ = -1; ///< Module ID (used internally by Model) std::string module_name_ = ""; ///<Module Name std::pair<int, int> tensor_index_ = std::make_pair(-1,-1); ///< Layer ID: pair consisting of OperationsList index and Layer index(used internally by Model) std::string layer_name_ = ""; ///< Layer name EvoNet::NodeType type_; ///< Node Type EvoNet::NodeStatus status_; ///< Node Status std::shared_ptr<ActivationOp<TensorT>> activation_; ///< Node activation function std::shared_ptr<ActivationOp<TensorT>> activation_grad_; ///< Node activation function std::shared_ptr<IntegrationOp<TensorT>> integration_; ///< Node integration function std::shared_ptr<IntegrationErrorOp<TensorT>> integration_error_; ///< Node integration error function std::shared_ptr<IntegrationWeightGradOp<TensorT>> integration_weight_grad_; ///< Node integration weight grad function TensorT output_min_ = (TensorT)-1.0e6; ///< Min Node output TensorT output_max_ = (TensorT)1.0e6; ///< Max Node output TensorT drop_probability_ = (TensorT)0.0; Eigen::Tensor<TensorT, 2> drop_; ///< Node Output drop tensor (initialized once per epoch) /** @brief output, error and derivative have the following dimensions: rows: # of samples, cols: # of time steps where the number of samples spans 0 to n samples and the number of time steps spans m time points to 0 */ Eigen::Tensor<TensorT, 2> input_; ///< Node Net Input (rows: # of samples, cols: # of time steps) Eigen::Tensor<TensorT, 2> output_; ///< Node Output (rows: # of samples, cols: # of time steps) Eigen::Tensor<TensorT, 2> error_; ///< Node Error (rows: # of samples, cols: # of time steps) Eigen::Tensor<TensorT, 2> derivative_; ///< Node Error (rows: # of samples, cols: # of time steps) Eigen::Tensor<TensorT, 2> dt_; ///< Resolution of each time-step (rows: # of samples, cols: # of time steps) }; template<typename TensorT> Node<TensorT>::Node(const Node<TensorT>& other) { id_ = other.id_; name_ = other.name_; module_id_ = other.module_id_; module_name_ = other.module_name_; tensor_index_ = other.tensor_index_; layer_name_ = other.layer_name_; type_ = other.type_; status_ = other.status_; setActivation(std::shared_ptr<ActivationOp<TensorT>>(other.activation_.get()->copy())); setActivationGrad(std::shared_ptr<ActivationOp<TensorT>>(other.activation_grad_.get()->copy())); setIntegration(std::shared_ptr<IntegrationOp<TensorT>>(other.integration_.get()->copy())); setIntegrationError(std::shared_ptr<IntegrationErrorOp<TensorT>>(other.integration_error_.get()->copy())); setIntegrationWeightGrad(std::shared_ptr<IntegrationWeightGradOp<TensorT>>(other.integration_weight_grad_.get()->copy())); output_min_ = other.output_min_; output_max_ = other.output_max_; drop_probability_ = other.drop_probability_; drop_ = other.drop_; } template<typename TensorT> Node<TensorT>::Node(const std::string& name, const EvoNet::NodeType& type, const EvoNet::NodeStatus& status, const std::shared_ptr<ActivationOp<TensorT>>& activation, const std::shared_ptr<ActivationOp<TensorT>>& activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& integration_weight_grad) : name_(name), type_(type), status_(status) { setActivation(activation); setActivationGrad(activation_grad); setIntegration(integration); setIntegrationError(integration_error); setIntegrationWeightGrad(integration_weight_grad); } template<typename TensorT> void Node<TensorT>::setId(const int& id) { id_ = id; if (name_ == "") { name_ = std::to_string(id); } } template<typename TensorT> int Node<TensorT>::getId() const { return id_; } template<typename TensorT> void Node<TensorT>::setName(const std::string& name) { name_ = name; } template<typename TensorT> std::string Node<TensorT>::getName() const { return name_; } template<typename TensorT> void Node<TensorT>::setType(const EvoNet::NodeType& type) { type_ = type; } template<typename TensorT> EvoNet::NodeType Node<TensorT>::getType() const { return type_; } template<typename TensorT> void Node<TensorT>::setStatus(const EvoNet::NodeStatus& status) { status_ = status; } template<typename TensorT> EvoNet::NodeStatus Node<TensorT>::getStatus() const { return status_; } template<typename TensorT> void Node<TensorT>::setActivation(const std::shared_ptr<ActivationOp<TensorT>>& activation) { activation_.reset(); activation_ = std::move(activation); } template<typename TensorT> std::shared_ptr<ActivationOp<TensorT>> Node<TensorT>::getActivationShared() const { return activation_; } template<typename TensorT> ActivationOp<TensorT>* Node<TensorT>::getActivation() const { return activation_.get(); } template<typename TensorT> void Node<TensorT>::setActivationGrad(const std::shared_ptr<ActivationOp<TensorT>>& activation_grad) { activation_grad_.reset(); activation_grad_ = std::move(activation_grad); } template<typename TensorT> std::shared_ptr<ActivationOp<TensorT>> Node<TensorT>::getActivationGradShared() const { return activation_grad_; } template<typename TensorT> ActivationOp<TensorT>* Node<TensorT>::getActivationGrad() const { return activation_grad_.get(); } template<typename TensorT> void Node<TensorT>::setIntegration(const std::shared_ptr<IntegrationOp<TensorT>>& integration) { integration_.reset(); integration_ = std::move(integration); } template<typename TensorT> std::shared_ptr<IntegrationOp<TensorT>> Node<TensorT>::getIntegrationShared() const { return integration_; } template<typename TensorT> IntegrationOp<TensorT>* Node<TensorT>::getIntegration() const { return integration_.get(); } template<typename TensorT> void Node<TensorT>::setIntegrationError(const std::shared_ptr<IntegrationErrorOp<TensorT>>& integration_error) { integration_error_.reset(); integration_error_ = std::move(integration_error); } template<typename TensorT> std::shared_ptr<IntegrationErrorOp<TensorT>> Node<TensorT>::getIntegrationErrorShared() const { return integration_error_; } template<typename TensorT> IntegrationErrorOp<TensorT>* Node<TensorT>::getIntegrationError() const { return integration_error_.get(); } template<typename TensorT> void Node<TensorT>::setIntegrationWeightGrad(const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& integration_weight_grad) { integration_weight_grad_.reset(); integration_weight_grad_ = std::move(integration_weight_grad); } template<typename TensorT> std::shared_ptr<IntegrationWeightGradOp<TensorT>> Node<TensorT>::getIntegrationWeightGradShared() const { return integration_weight_grad_; } template<typename TensorT> IntegrationWeightGradOp<TensorT>* Node<TensorT>::getIntegrationWeightGrad() const { return integration_weight_grad_.get(); } template<typename TensorT> void Node<TensorT>::setModuleId(const int & module_id) { module_id_ = module_id; } template<typename TensorT> int Node<TensorT>::getModuleId() const { return module_id_; } template<typename TensorT> void Node<TensorT>::setTensorIndex(const std::pair<int, int>& layer_id) { tensor_index_ = layer_id; } template<typename TensorT> std::pair<int, int> Node<TensorT>::getTensorIndex() const { return tensor_index_; } template<typename TensorT> void Node<TensorT>::setModuleName(const std::string & module_name) { module_name_ = module_name; } template<typename TensorT> std::string Node<TensorT>::getModuleName() const { return module_name_; } template<typename TensorT> inline void Node<TensorT>::setLayerName(const std::string & layer_name) { layer_name_ = layer_name; } template<typename TensorT> inline std::string Node<TensorT>::getLayerName() const { return layer_name_; } template<typename TensorT> void Node<TensorT>::setOutputMin(const TensorT& output_min) { output_min_ = output_min; } template<typename TensorT> void Node<TensorT>::setOutputMax(const TensorT& output_max) { output_max_ = output_max; } template<typename TensorT> void Node<TensorT>::setDropProbability(const TensorT & drop_probability) { drop_probability_ = drop_probability; } template<typename TensorT> TensorT Node<TensorT>::getDropProbability() const { return drop_probability_; } template<typename TensorT> void Node<TensorT>::setDrop(const Eigen::Tensor<TensorT, 2>& drop) { drop_ = drop; } template<typename TensorT> Eigen::Tensor<TensorT, 2> Node<TensorT>::getDrop() const { return drop_; } template<typename TensorT> void Node<TensorT>::setInput(const Eigen::Tensor<TensorT, 2>& input) { input_ = input; } template<typename TensorT> Eigen::Tensor<TensorT, 2> Node<TensorT>::getInput() const { return input_; } template<typename TensorT> void Node<TensorT>::setOutput(const Eigen::Tensor<TensorT, 2>& output) { output_ = output; } template<typename TensorT> Eigen::Tensor<TensorT, 2> Node<TensorT>::getOutput() const { return output_;// *getDrop(); } template<typename TensorT> void Node<TensorT>::setError(const Eigen::Tensor<TensorT, 2>& error) { error_ = error; } template<typename TensorT> Eigen::Tensor<TensorT, 2> Node<TensorT>::getError() const { return error_; } template<typename TensorT> void Node<TensorT>::setDerivative(const Eigen::Tensor<TensorT, 2>& derivative) { derivative_ = derivative; } template<typename TensorT> Eigen::Tensor<TensorT, 2> Node<TensorT>::getDerivative() const { return derivative_; } template<typename TensorT> void Node<TensorT>::setDt(const Eigen::Tensor<TensorT, 2>& dt) { dt_ = dt; } template<typename TensorT> Eigen::Tensor<TensorT, 2> Node<TensorT>::getDt() const { return dt_; } //template<typename TensorT> //Node<TensorT> Node<TensorT>::getNodeCopy(const Node<TensorT>& other) const { // Node<TensorT> node_copy; // node_copy.setId(other.id_); // name_ = other.name_; // module_id_ = other.module_id_; // module_name_ = other.module_name_; // tensor_index_ = other.tensor_index_; // layer_name_ = other.layer_name_; // type_ = other.type_; // activation_ = std::make_shared<ActivationOp<Tensor>>(*other.activation_); // activation_grad_ = std::make_shared<ActivationOp<Tensor>>(*other.activation_grad_); // integration_ = std::make_shared<IntegrationOp<Tensor>>(*other.integration_); // integration_error_ = std::make_shared<IntegrationErrorOp<Tensor>>(*other.integration_error_); other.integration_error_; // integration_weight_grad_ = std::make_shared<IntegrationWeightGradOp<Tensor>>(*other.integration_weight_grad_); // status_ = other.status_; // output_min_ = other.output_min_; // output_max_ = other.output_max_; // drop_probability_ = other.drop_probability_; // drop_ = other.drop_; // return *this; //} } #endif //EVONET_NODE_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE BiochemicalReaction test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/BiochemicalReaction.h> #include <EvoNet/test_config.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(biochemicalreaction) BOOST_AUTO_TEST_CASE(constructor) { BiochemicalReactionModel<float>* ptr = nullptr; BiochemicalReactionModel<float>* nullPointer = nullptr; ptr = new BiochemicalReactionModel<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { BiochemicalReactionModel<float>* ptr = nullptr; ptr = new BiochemicalReactionModel<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(BiochemicalReactionUpdateEquation) { BiochemicalReaction biochemReaction; // Create a dummy biochemical reaction biochemReaction.reactants_stoichiometry = {-1, -1, -2}; biochemReaction.products_stoichiometry = { 1, 2 }; biochemReaction.reactants_ids = {"adp", "glc", "h"}; biochemReaction.products_ids = { "atp", "g6p" }; // Test that the reaction string is made correctly biochemReaction.updateEquation(); BOOST_CHECK_EQUAL(biochemReaction.equation, "adp + glc + 2 h = atp + 2 g6p"); // Remove/Add reactants/products biochemReaction.reactants_stoichiometry = { -1, -1 }; biochemReaction.products_stoichiometry = { 1, 1, 1 }; biochemReaction.reactants_ids = { "adp", "glc" }; biochemReaction.products_ids = { "atp", "g6p", "h" }; // Check that the equation string is what it should be biochemReaction.updateEquation(); BOOST_CHECK_EQUAL(biochemReaction.equation, "adp + glc = atp + g6p + h"); } BOOST_AUTO_TEST_CASE(ReadMetabolomicsData) { // Read in the metabolomics data BiochemicalReactionModel<float> biochemReactModel; std::string filename = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_train.csv"); biochemReactModel.readMetabolomicsData(filename); BOOST_CHECK_EQUAL(biochemReactModel.metabolomicsData_.size(), 4); BOOST_CHECK_EQUAL(biochemReactModel.metabolomicsData_.at("S01_D01_PLT_25C_0hr").size(), 81); const MetabolomicsDatum& test1 = biochemReactModel.metabolomicsData_.at("S01_D01_PLT_25C_0hr").at("2pg").at(0); BOOST_CHECK_EQUAL(test1.sample_name, "S01_D01_PLT_25C_0hr_Broth-1"); BOOST_CHECK_EQUAL(test1.sample_group_name, "S01_D01_PLT_25C_0hr"); BOOST_CHECK_EQUAL(test1.component_name, "Pool_2pg_3pg.Pool_2pg_3pg_1.Light"); BOOST_CHECK_EQUAL(test1.component_group_name, "2pg"); BOOST_CHECK_EQUAL(test1.calculated_concentration_units, "uM"); BOOST_CHECK_EQUAL(test1.time_point, 0); BOOST_CHECK_CLOSE(test1.calculated_concentration, 0.926902, 1e-4); BOOST_CHECK(test1.used); const MetabolomicsDatum& test2 = biochemReactModel.metabolomicsData_.at("S01_D01_PLT_25C_6.5hr").at("utp").at(5); BOOST_CHECK_EQUAL(test2.sample_name, "S01_D01_PLT_25C_6.5hr_Broth-6"); BOOST_CHECK_EQUAL(test2.sample_group_name, "S01_D01_PLT_25C_6.5hr"); BOOST_CHECK_EQUAL(test2.component_name, "utp.utp_1.Light"); BOOST_CHECK_EQUAL(test2.component_group_name, "utp"); BOOST_CHECK_EQUAL(test2.calculated_concentration_units, "uM"); BOOST_CHECK_EQUAL(test2.time_point, 6.5); BOOST_CHECK_CLOSE(test2.calculated_concentration, 2.105641, 1e-4); BOOST_CHECK(test1.used); //for (const auto& sample_group_map : biochemReactModel.metabolomicsData_) { // for (const auto& component_group_map : sample_group_map.second) { // for (const auto& met_datum : component_group_map.second) { // std::cout << met_datum.print() << std::endl; // } // } //} } BOOST_AUTO_TEST_CASE(ReadBiochemicalReactions) { // Read in the biochemical model BiochemicalReactionModel<float> biochemReactModel; std::string filename = EVONET_GET_TEST_DATA_PATH("RBCGlycolysis.csv"); biochemReactModel.readBiochemicalReactions(filename); BOOST_CHECK_EQUAL(biochemReactModel.biochemicalReactions_.size(), 26); const BiochemicalReaction& test1 = biochemReactModel.biochemicalReactions_.at("ADK1"); BOOST_CHECK_EQUAL(test1.model_id, ""); BOOST_CHECK_EQUAL(test1.reaction_id, "ADK1"); BOOST_CHECK_EQUAL(test1.reaction_name, "Adenylate kinase"); BOOST_CHECK_EQUAL(test1.equation, "amp_c + atp_c <=> 2.0 adp_c"); BOOST_CHECK_EQUAL(test1.subsystem, ""); BOOST_CHECK_EQUAL(test1.gpr, "Ak1_AT1"); BOOST_CHECK_EQUAL(test1.reactants_stoichiometry.at(0), -1); BOOST_CHECK_EQUAL(test1.reactants_stoichiometry.at(1), -1); BOOST_CHECK_EQUAL(test1.products_stoichiometry.at(0), 2); BOOST_CHECK_EQUAL(test1.reactants_ids.at(0), "amp_c"); BOOST_CHECK_EQUAL(test1.reactants_ids.at(1), "atp_c"); BOOST_CHECK_EQUAL(test1.products_ids.at(0), "adp_c"); BOOST_CHECK(test1.reversibility); BOOST_CHECK(test1.used); const BiochemicalReaction& test2 = biochemReactModel.biochemicalReactions_.at("TPI"); BOOST_CHECK_EQUAL(test2.model_id, ""); BOOST_CHECK_EQUAL(test2.reaction_id, "TPI"); BOOST_CHECK_EQUAL(test2.reaction_name, "Triose-phosphate isomerase"); BOOST_CHECK_EQUAL(test2.equation, "dhap_c <=> g3p_c"); BOOST_CHECK_EQUAL(test2.subsystem, ""); BOOST_CHECK_EQUAL(test2.gpr, "Tpi1_AT1"); BOOST_CHECK_EQUAL(test2.reactants_stoichiometry.at(0), -1); BOOST_CHECK_EQUAL(test2.products_stoichiometry.at(0), 1); BOOST_CHECK_EQUAL(test2.reactants_ids.at(0), "dhap_c"); BOOST_CHECK_EQUAL(test2.products_ids.at(0), "g3p_c"); BOOST_CHECK(test2.reversibility); BOOST_CHECK(test1.used); //for (const auto& reaction : biochemReactModel.biochemicalReactions_) { // std::cout << reaction.second.print() << std::endl; //} } BOOST_AUTO_TEST_CASE(ReadMetaData) { BiochemicalReactionModel<float> biochemReactModel; std::string filename = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_train.csv"); biochemReactModel.readMetaData(filename); BOOST_CHECK_EQUAL(biochemReactModel.metaData_.size(), 4); const MetaDatum& test1 = biochemReactModel.metaData_.at("S01_D01_PLT_25C_0hr"); BOOST_CHECK_EQUAL(test1.sample_group_name, "S01_D01_PLT_25C_0hr"); BOOST_CHECK_EQUAL(test1.condition, "D01"); BOOST_CHECK_EQUAL(test1.time, "0"); BOOST_CHECK_EQUAL(test1.subject, "S01"); BOOST_CHECK_EQUAL(test1.temperature, "25C"); //for (const auto& metadatum : biochemReactModel.metaData_) { // std::cout << metadatum.second.print() << std::endl; //} } BOOST_AUTO_TEST_CASE(findComponentGroupNames) { BiochemicalReactionModel<float> biochemReactModel; biochemReactModel.readMetabolomicsData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_train.csv")); biochemReactModel.readBiochemicalReactions(EVONET_GET_TEST_DATA_PATH("RBCGlycolysis.csv")); biochemReactModel.readMetaData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_train.csv")); BOOST_CHECK_EQUAL(biochemReactModel.component_group_names_.size(), 0); biochemReactModel.findComponentGroupNames(); BOOST_CHECK_EQUAL(biochemReactModel.component_group_names_.size(), 81); BOOST_CHECK_EQUAL(biochemReactModel.component_group_names_.at(0), "2pg"); //for (const std::string& react : biochemReactModel.component_group_names_) { // std::cout << react << "; "; //} //std::cout << std::endl; } BOOST_AUTO_TEST_CASE(findMARs) { BiochemicalReactionModel<float> biochemReactModel; biochemReactModel.readMetabolomicsData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_train.csv")); biochemReactModel.readBiochemicalReactions(EVONET_GET_TEST_DATA_PATH("RBCGlycolysis.csv"), true); biochemReactModel.readMetaData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_train.csv")); biochemReactModel.findComponentGroupNames(); BOOST_CHECK_EQUAL(biochemReactModel.reaction_ids_.size(), 0); biochemReactModel.findMARs(); biochemReactModel.findMARs(false, true); BOOST_CHECK_EQUAL(biochemReactModel.reaction_ids_.size(), 10); BOOST_CHECK_EQUAL(biochemReactModel.reaction_ids_.at(0), "ADK1"); biochemReactModel.removeRedundantMARs(); BOOST_CHECK_EQUAL(biochemReactModel.reaction_ids_.size(), 9); BOOST_CHECK_EQUAL(biochemReactModel.reaction_ids_.at(0), "ADK1"); //for (const std::string& react : biochemReactModel.reaction_ids_) { // std::cout << react << "; "; //} //std::cout << std::endl; } BOOST_AUTO_TEST_CASE(findLabels) { BiochemicalReactionModel<float> biochemReactModel; biochemReactModel.readMetabolomicsData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_train.csv")); biochemReactModel.readBiochemicalReactions(EVONET_GET_TEST_DATA_PATH("RBCGlycolysis.csv")); biochemReactModel.readMetaData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_train.csv")); BOOST_CHECK_EQUAL(biochemReactModel.labels_.size(), 0); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_names_.size(), 0); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_name_to_label_.size(), 0); biochemReactModel.findLabels("condition"); BOOST_CHECK_EQUAL(biochemReactModel.labels_.size(), 1); BOOST_CHECK_EQUAL(biochemReactModel.labels_.at(0), "D01"); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_names_.size(), 4); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_names_.at(0), "S01_D01_PLT_25C_0hr"); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_name_to_label_.size(), 4); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_name_to_label_.at("S01_D01_PLT_25C_0hr"), "D01"); biochemReactModel.findLabels("subject"); BOOST_CHECK_EQUAL(biochemReactModel.labels_.size(), 1); BOOST_CHECK_EQUAL(biochemReactModel.labels_.at(0), "S01"); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_names_.size(), 4); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_names_.at(0), "S01_D01_PLT_25C_0hr"); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_names_.size(), 4); BOOST_CHECK_EQUAL(biochemReactModel.sample_group_name_to_label_.at("S01_D01_PLT_25C_0hr"), "S01"); //for (const std::string& react : biochemReactModel.labels_) { // std::cout << react << "; "; //} //std::cout << std::endl; //for (const std::string& react : biochemReactModel.sample_group_names_) { // std::cout << react << "; "; //} //std::cout << std::endl; } BOOST_AUTO_TEST_CASE(makeDefaultMetabolomicsData) { BiochemicalReactionModel<float> biochemReactModel; // NO TEST } BOOST_AUTO_TEST_CASE(calculateMAR) { BiochemicalReactionModel<float> biochemReactModel; // TODO } BOOST_AUTO_TEST_CASE(getRandomConcentration) { BiochemicalReactionModel<float> biochemReactModel; // NO TEST } BOOST_AUTO_TEST_CASE(getDefaultMets) { BiochemicalReactionModel<float> biochemReactModel; // NO TEST } BOOST_AUTO_TEST_CASE(getCurrencyMets) { BiochemicalReactionModel<float> biochemReactModel; // NO TEST } BOOST_AUTO_TEST_CASE(getMaxReplicates) { BiochemicalReactionModel<float> biochemReactModel; biochemReactModel.readMetabolomicsData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_train.csv")); biochemReactModel.readBiochemicalReactions(EVONET_GET_TEST_DATA_PATH("RBCGlycolysis.csv"), true); biochemReactModel.readMetaData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_train.csv")); biochemReactModel.findComponentGroupNames(); biochemReactModel.findMARs(); biochemReactModel.findLabels("condition"); std::map<std::string, int> sample_group_name_to_reps; std::pair<int, int> max_reps_n_labels = biochemReactModel.getMaxReplicatesAndNLabels(sample_group_name_to_reps, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_); BOOST_CHECK_EQUAL(max_reps_n_labels.first, 6); BOOST_CHECK_EQUAL(max_reps_n_labels.second, 24); for (const auto& rep_map : sample_group_name_to_reps) { BOOST_CHECK_EQUAL(rep_map.second, 6); } } BOOST_AUTO_TEST_CASE(getMetDataAsTensors) { BiochemicalReactionModel<float> biochemReactModel; biochemReactModel.readMetabolomicsData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_train.csv")); biochemReactModel.readBiochemicalReactions(EVONET_GET_TEST_DATA_PATH("RBCGlycolysis.csv"), true); biochemReactModel.readMetaData(EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_train.csv")); biochemReactModel.findComponentGroupNames(); biochemReactModel.findMARs(); biochemReactModel.findLabels("condition"); // determine the dimensions of the Tensors std::map<std::string, int> sample_group_name_to_reps; std::pair<int, int> max_reps_n_labels = biochemReactModel.getMaxReplicatesAndNLabels(sample_group_name_to_reps, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_); Eigen::Tensor<float, 2> metabo_concs(int(biochemReactModel.component_group_names_.size()), max_reps_n_labels.second); Eigen::Tensor<float, 2> mars_values(int(biochemReactModel.reaction_ids_.size()), max_reps_n_labels.second); std::vector<std::string> metabo_labels; metabo_labels.reserve(max_reps_n_labels.second); // use_concentrations, iter_values, fill_zero biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, false, true, false, false, true, false, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_CLOSE(metabo_concs(24, 6), 0, 1e-4); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_CLOSE(metabo_concs(0, 0), 0.926901623, 1e-4); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_CLOSE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), 2.105641075, 1e-4); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_CLOSE(metabo_concs(19, 11), 1e-6, 1e-4); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_concentrations, iter_values, fill_mean biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, false, true, false, true, false, false, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_CLOSE(metabo_concs(24, 6), 0, 1e-4); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_CLOSE(metabo_concs(0, 0), 0.926901623, 1e-4); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_CLOSE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), 2.105641075, 1e-4); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_CLOSE(metabo_concs(19, 11), 0.0314822569, 1e-4); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_concentrations, iter_values, fill_sampling biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, false, true, true, false, false, false, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_CLOSE(metabo_concs(24, 6), 0, 1e-4); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_CLOSE(metabo_concs(0, 0), 0.926901623, 1e-4); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_CLOSE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), 2.105641075, 1e-4); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_GE(metabo_concs(19, 11), 0.018); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_LE(metabo_concs(19, 11), 0.072); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_concentrations, sample_values biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, true, false, false, false, false, false, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_CLOSE(metabo_concs(24, 6), 0, 1e-4); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_GE(metabo_concs(0, 0), 0.92); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_LE(metabo_concs(0, 0), 1.04); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_GE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), 2.07); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_LE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), 2.32); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_GE(metabo_concs(19, 11), 0.018); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_LE(metabo_concs(19, 11), 0.072); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_MARs, sample_values biochemReactModel.getMetDataAsTensors(mars_values, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.reaction_ids_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, false, true, true, false, false, false, false, false, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_GE(mars_values(0, 0), 0.03); BOOST_CHECK_LE(mars_values(0, 0), 0.06); BOOST_CHECK_GE(mars_values(int(biochemReactModel.reaction_ids_.size()) - 1, max_reps_n_labels.second - 1), 600); BOOST_CHECK_LE(mars_values(int(biochemReactModel.reaction_ids_.size()) - 1, max_reps_n_labels.second - 1), 1000); BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_concentrations, iter_values, fill_zero, apply_fold_change biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, false, true, false, false, true, true, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_CLOSE(metabo_concs(24, 6), 0, 1e-4); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_CLOSE(metabo_concs(0, 0), 0, 1e-4); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_CLOSE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), -0.0343664847, 1e-4); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_CLOSE(metabo_concs(19, 11), -1, 1e-4); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_concentrations, iter_values, fill_mean, apply_fold_change biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, false, true, false, true, false, true, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_CLOSE(metabo_concs(24, 6), 0, 1e-4); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_CLOSE(metabo_concs(0, 0), 0, 1e-4); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_CLOSE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), -0.0343664847, 1e-4); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_CLOSE(metabo_concs(19, 11), -0.404844701, 1e-4); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_concentrations, iter_values, fill_sampling, apply_fold_change biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, false, true, true, false, false, true, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_CLOSE(metabo_concs(24, 6), 0, 1e-4); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_CLOSE(metabo_concs(0, 0), 0, 1e-4); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_CLOSE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), -0.0343664847, 1e-4); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_GE(metabo_concs(19, 11), -0.7); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_LE(metabo_concs(19, 11), -0.3); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_concentrations, sample_values, apply_fold_change biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, true, false, false, false, false, true, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_CLOSE(metabo_concs(24, 6), 0, 1e-4); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_GE(metabo_concs(0, 0), -0.1); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_LE(metabo_concs(0, 0), 0.1); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_GE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), -0.3); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_LE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), 0.3); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_GE(metabo_concs(19, 11), -0.8); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_LE(metabo_concs(19, 11), -0.4); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_MARs, sample_values, apply_fold_change biochemReactModel.getMetDataAsTensors(mars_values, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.reaction_ids_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, false, true, true, false, false, false, false, true, "S01_D01_PLT_25C_0hr", 10, false); BOOST_CHECK_GE(mars_values(0, 0), -0.5); BOOST_CHECK_LE(mars_values(0, 0), 0.1); BOOST_CHECK_GE(mars_values(int(biochemReactModel.reaction_ids_.size()) - 1, max_reps_n_labels.second - 1), 0.6); BOOST_CHECK_LE(mars_values(int(biochemReactModel.reaction_ids_.size()) - 1, max_reps_n_labels.second - 1), 1.0); BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); // use_concentrations, iter_values, fill_zero, randomized_sample_group_names biochemReactModel.getMetDataAsTensors(metabo_concs, metabo_labels, biochemReactModel.sample_group_names_, biochemReactModel.component_group_names_, biochemReactModel.sample_group_name_to_label_, sample_group_name_to_reps, true, false, false, true, false, false, true, false, "S01_D01_PLT_25C_0hr", 10, true); BOOST_CHECK_NE(metabo_concs(24, 6), 0); // component_group_name dctp is missing from sample_group_name S01_D01_PLT_25C_22hr BOOST_CHECK_NE(metabo_concs(0, 0), 0.926901623); // 2pg and S01_D01_PLT_25C_0hr BOOST_CHECK_NE(metabo_concs(int(biochemReactModel.component_group_names_.size()) - 1, max_reps_n_labels.second - 1), 2.105641075); // utp and S01_D01_PLT_25C_6.5hr BOOST_CHECK_NE(metabo_concs(19, 11), 1e-6); // cmp and S01_D01_PLT_25C_22hr BOOST_CHECK_EQUAL(metabo_labels.size(), max_reps_n_labels.second); BOOST_CHECK_EQUAL(metabo_labels.at(0), "D01"); } BOOST_AUTO_TEST_CASE(clear) { BiochemicalReactionModel<float> biochemReactModel; // TODO } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_POPULATIONTRAINEREXPERIMENTALGPU_H #define EVONET_POPULATIONTRAINEREXPERIMENTALGPU_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> // .h #include <EvoNet/ml/PopulationTrainerExperimental.h> #include <EvoNet/ml/ModelInterpreterGpu.h> // .cpp namespace EvoNet { /** @brief Class to train a vector of models */ template<typename TensorT> class PopulationTrainerExperimentalGpu : public PopulationTrainerExperimental<TensorT, ModelInterpreterGpu<TensorT>> { }; } #endif #endif //EVONET_POPULATIONTRAINEREXPERIMENTALGPU_H<file_sep>/**TODO: Add copyright*/ #include <EvoNet/io/CSVWriter.h> #include <fstream> namespace EvoNet { CSVWriter::CSVWriter(){} CSVWriter::~CSVWriter(){} CSVWriter::CSVWriter(const std::string& filename, std::string delm) : filename_(filename), delimeter_(delm), line_count_(0) {} void CSVWriter::setFilename(const std::string& filename) { filename_ = filename; } std::string CSVWriter::getFilename() const { return filename_; } void CSVWriter::setDelimeter(const std::string& delimeter) { delimeter_ = delimeter; } std::string CSVWriter::getDelimeter() const { return delimeter_; } void CSVWriter::setLineCount(const int& line_count) { line_count_ = line_count; } int CSVWriter::getLineCount() const { return line_count_; } }<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Weight<float> test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/Weight.h> #include <EvoNet/ml/Solver.h> #include <EvoNet/ml/WeightInit.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(weight1) BOOST_AUTO_TEST_CASE(constructor) { Weight<float>* ptr = nullptr; Weight<float>* nullPointer = nullptr; ptr = new Weight<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { Weight<float>* ptr = nullptr; ptr = new Weight<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(constructor2) { Weight<float> weight; // ID constructor weight = Weight<float>(1); BOOST_CHECK_EQUAL(weight.getId(), 1); BOOST_CHECK_EQUAL(weight.getName(), "1"); // ID and attributes std::shared_ptr<WeightInitOp<float>> weight_init(new ConstWeightInitOp<float>(2.0)); std::shared_ptr<SolverOp<float>> solver(new SGDOp<float>(0.01, 0.9)); weight = Weight<float>(1, weight_init, solver); BOOST_CHECK_EQUAL(weight.getWeightInitOp(), weight_init.get()); BOOST_CHECK_EQUAL(weight.getSolverOp(), solver.get()); BOOST_CHECK_EQUAL(weight.getWeightInitOp()->operator()(), 2.0); } BOOST_AUTO_TEST_CASE(comparison) { Weight<float> weight, weight_test; weight = Weight<float>(1); weight_test = Weight<float>(1); BOOST_CHECK(weight == weight_test); weight = Weight<float>(2); BOOST_CHECK(weight != weight_test); } BOOST_AUTO_TEST_CASE(gettersAndSetters) { Weight<float> weight; weight.setId(1); weight.setModuleId(2); weight.setModuleName("2"); weight.setDropProbability(1.0f); // Check getters BOOST_CHECK_EQUAL(weight.getId(), 1); BOOST_CHECK_EQUAL(weight.getName(), "1"); BOOST_CHECK_EQUAL(weight.getModuleId(), 2); BOOST_CHECK_EQUAL(weight.getModuleName(), "2"); BOOST_CHECK_EQUAL(weight.getDropProbability(), 1.0f); BOOST_CHECK_EQUAL(weight.getDrop(), 1.0f); // Check name getter weight.setName("weight1"); BOOST_CHECK_EQUAL(weight.getName(), "weight1"); // Check shared_ptr setters and getters std::shared_ptr<WeightInitOp<float>> weight_init(new ConstWeightInitOp<float>(2.0)); std::shared_ptr<SolverOp<float>> solver(new SGDOp<float>(0.01, 0.9)); weight.setWeightInitOp(weight_init); weight.setSolverOp(solver); BOOST_CHECK_EQUAL(weight.getWeightInitOp(), weight_init.get()); BOOST_CHECK_EQUAL(weight.getSolverOp(), solver.get()); BOOST_CHECK_EQUAL(weight.getWeightInitOp()->operator()(), 2.0); // Check weight after initialization weight.initWeight(); weight.setWeight(4.0); BOOST_CHECK_EQUAL(weight.getWeight(), 4.0); // Check drop probability mask weight.setDropProbability(1.0f); BOOST_CHECK_EQUAL(weight.getDrop(), 1.0f); } BOOST_AUTO_TEST_CASE(initWeight) { Weight<float> weight; weight.setId(1); std::shared_ptr<WeightInitOp<float>> weight_init(new ConstWeightInitOp<float>(2.0)); weight.setWeightInitOp(weight_init); weight.initWeight(); BOOST_CHECK_EQUAL(weight.getWeight(), 2.0); } BOOST_AUTO_TEST_CASE(assignment) { Weight<float> weight; weight.setId(1); weight.setName("1"); weight.setModuleId(1); weight.setModuleName("Mod1"); weight.setDropProbability(0.0f); weight.setWeightInitOp(std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(2.0))); weight.setSolverOp(std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9))); Weight<float> weight2(weight); BOOST_CHECK_EQUAL(weight.getId(), weight2.getId()); BOOST_CHECK_EQUAL(weight.getName(), weight2.getName()); BOOST_CHECK_EQUAL(weight.getModuleId(), weight2.getModuleId()); BOOST_CHECK_EQUAL(weight.getModuleName(), weight2.getModuleName()); BOOST_CHECK_EQUAL(weight.getDropProbability(), weight2.getDropProbability()); BOOST_CHECK_EQUAL(weight.getDrop(), weight2.getDrop()); BOOST_CHECK_NE(weight.getWeightInitOp(), weight2.getWeightInitOp()); BOOST_CHECK_NE(weight.getSolverOp(), weight2.getSolverOp()); Weight<float> weight3 = weight; BOOST_CHECK_EQUAL(weight.getId(), weight3.getId()); BOOST_CHECK_EQUAL(weight.getName(), weight3.getName()); BOOST_CHECK_EQUAL(weight.getModuleId(), weight3.getModuleId()); BOOST_CHECK_EQUAL(weight.getModuleName(), weight3.getModuleName()); BOOST_CHECK_EQUAL(weight.getDropProbability(), weight3.getDropProbability()); BOOST_CHECK_EQUAL(weight.getDrop(), weight3.getDrop()); BOOST_CHECK_NE(weight.getWeightInitOp(), weight3.getWeightInitOp()); BOOST_CHECK_NE(weight.getSolverOp(), weight3.getSolverOp()); } // Broke when adding nodeData //BOOST_AUTO_TEST_CASE(updateWeightWithDropConnection) //{ // Weight<float> weight; // weight.setId(1); // std::shared_ptr<WeightInitOp<float>> weight_init(new ConstWeightInitOp<float>(2.0)); // weight.setWeightInitOp(weight_init); // weight.initWeight(); // weight.setWeight(1.0); // weight.setDropProbability(1.0f); // std::shared_ptr<SolverOp<float>> solver(new SGDOp<float>(0.01, 0.9)); // weight.setSolverOp(solver); // weight.updateWeight(2.0); // // // No weight update due to mask // // [TODO: re-implement drop connection] // //BOOST_CHECK_CLOSE(weight.getWeight(), 0.0f, 1e-3); // //BOOST_CHECK_CLOSE(weight.getWeightView(), 0.0f, 1e-3); //} BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODEL_H #define EVONET_MODEL_H // .h #include <EvoNet/ml/Link.h> #include <EvoNet/ml/Node.h> #include <EvoNet/ml/Weight.h> #include <unsupported/Eigen/CXX11/Tensor> #include <vector> #include <map> #include <tuple> #include <list> #include <set> // .cpp #include <EvoNet/graph/CircuitFinder.h> #include <iostream> #include <cereal/access.hpp> // serialiation of private members #include <cereal/types/memory.hpp> #include <cereal/types/map.hpp> #include <cereal/types/utility.hpp> // std::pair #include <cereal/types/vector.hpp> namespace EvoNet { /** @brief Directed Network Model Assumptions about the model structure: 1. Inputs can only be sources 2. Outputs can only be sinks */ template<typename TensorT> class Model { public: Model() = default; ///< Default constructor Model(const Model& other); ///< Copy constructor that does not create a shared memory address between model nodes/links/weights Model(const int& id); ///< Explicit constructor ~Model() = default; ///< Default destructor inline bool operator==(const Model& other) const { return std::tie( id_, name_, links_, nodes_, weights_, input_nodes_, output_nodes_, cyclic_pairs_ ) == std::tie( other.id_, other.name_, other.links_, other.nodes_, other.weights_, other.input_nodes_, other.output_nodes_, other.cyclic_pairs_ ) ; } inline bool operator!=(const Model& other) const { return !(*this == other); } /** @brief Copy assignment operator that creates a new model with different memory addresses */ inline Model& operator=(const Model& other) { id_ = other.id_; name_ = other.name_; links_ = other.links_; nodes_ = other.nodes_; weights_ = other.weights_; cyclic_pairs_ = other.cyclic_pairs_; return *this; } void setId(const int& id); ///< id setter int getId() const; ///< id getter void setName(const std::string& name); ///< name setter std::string getName() const; ///< name getter void setInputAndOutputNodes(); ///< iterate through the model nodes and record the input and output nodes std::vector<std::shared_ptr<Node<TensorT>>> getInputNodes(); ///< input_node getter std::vector<std::shared_ptr<Node<TensorT>>> getOutputNodes(); ///< output_node getter std::vector<std::string> getOutputNodeNames() const; /** @brief Add new links to the model. @param[in] links Links to add to the model */ void addLinks(const std::vector<Link>& links); Link getLink(const std::string& link_name) const; ///< link getter std::vector<Link> getLinks() const; ///< links getter std::map<std::string, std::shared_ptr<Link>>& getLinksMap(); ///< return a modifiable version of weights /** @brief Remove existing links from the model. @param[in] Link_names Links to remove from the model */ void removeLinks(const std::vector<std::string>& link_names); /** @brief Add new nodes to the model. @param[in] nodes Nodes to add to the model */ void addNodes(const std::vector<Node<TensorT>>& nodes); Node<TensorT> getNode(const std::string& node_name) const; ///< node getter std::vector<Node<TensorT>> getNodes() const; ///< nodes getter std::map<std::string, std::shared_ptr<Node<TensorT>>>& getNodesMap(); ///< return a modifiable version of weights std::map<std::string, std::vector<std::string>> getModuleNodeNameMap() const; ///< return a map of modules to a vector of node names [TODO: test!] /** @brief Remove existing nodes from the model. @param[in] node_names Nodes to remove from the model */ void removeNodes(const std::vector<std::string>& node_names); /** @brief Add new weights to the model. @param[in] weights Weights to add to the model */ void addWeights(const std::vector<Weight<TensorT>>& weights); Weight<TensorT> getWeight(const std::string& weight_name) const; ///< weight getter std::vector<Weight<TensorT>> getWeights() const; ///< weights getter std::map<std::string, std::shared_ptr<Weight<TensorT>>>& getWeightsMap(); ///< return a modifiable version of weights_ /** @brief Remove existing weights from the model. @param[in] weight_names Weights to remove from the model */ void removeWeights(const std::vector<std::string>& weight_names); /** @brief Removes nodes from the model that no longer have an associated link. @returns True if nodes were removed, False otherwise */ bool pruneNodes(); /** @brief Removes links from the model that no longer have associated nodes. @returns True if links were removed, False otherwise */ bool pruneLinks(); /** @brief Removes weights from the model that no longer have associated links. @returns True if weights were removed, False otherwise */ bool pruneWeights(); /** @brief Removes dangling links, weights, and nodes recursively until there are no more dangling model components or the number of user specified iterations has been reached. @param[in] iterations The number of recursive iterations to prune */ void pruneModel(int iterations = 1e3); /** @brief Check to ensure that the nodes are in the model @param[in] node_names */ bool checkNodeNames(const std::vector<std::string> node_names); /** @brief Check to ensure that the links are in the model @param[in] link_names */ bool checkLinkNames(const std::vector<std::string> link_names); /** @brief Check to ensure that the weights are in the model @param[in] weight_names */ bool checkWeightNames(const std::vector<std::string> weight_names); /** @brief Check that the path from input to output is not broken Note: The method will modify the model weights, nodes, and errors It is recommended to first create a copy of the model that will be later discarded Or re-initialize the model after. [DEPRECATED: params no longer needed] @param[in] input_nodes @param[out] output_nodes */ bool checkCompleteInputToOutput(); void checkCompleteInputToOutput_(const std::string & node_cur, std::set<std::string>& found_nodes, std::set<std::string>& output_nodes, std::set<std::string>& found_output_nodes); void checkCompleteOutputToInput_(const std::string& node_cur, std::set<std::string>& found_nodes, std::set<std::string>& input_nodes, std::set<std::string>& found_input_nodes); /** @brief Check model link node and weight names [TODO: add tests...] @param[out] nodes_not_found @param[out] weights_not_found */ bool checkLinksNodeAndWeightNames( std::vector<std::string>& nodes_not_found, std::vector<std::string>& weights_not_found); /** @brief Remove hidden nodes that have either only 1 source and no sink connection or 1 sink and no source connection */ bool removeIsolatedNodes(); /** @brief Convert model to adjacency list TODO: Implement tests @param[out] node_id_map Map of node id to node name @param[out] node_cnt the number of vertices in the adjacency list @returns An adjacency list representation of a graph */ std::list<int>* convertToAdjacencyList(std::map<int, std::string>& node_id_map, int& node_cnt); void findCycles(); void addCyclicPairs(const std::pair<std::string, std::string>& cyclic_pair); std::set<std::pair<std::string, std::string>> getCyclicPairs() const; void setError(const Eigen::Tensor<TensorT, 2> model_error); ///< model_error setter Eigen::Tensor<TensorT, 2> getError() const; ///< model_error getter void setMetric(const Eigen::Tensor<TensorT, 2> model_metrics); ///< model_metrics setter Eigen::Tensor<TensorT, 2> getMetric() const; ///< model_metrics getter /** @brief Re-initialize all node tensor indices [TODO: add tests!] */ void initNodeTensorIndices(); /** @brief Re-initialize all weight tensor indices [TODO: add tests!] */ void initWeightTensorIndices(); void setBatchAndMemorySizes(const int& batch_size, const int& memory_size); ///< batch and memory sizes setter std::pair<int, int> getBatchAndMemorySizes() const; ///< batch and memory sizes getter (non-padded sizes) void clear(); ///< clear all member data std::map<std::string, std::shared_ptr<Link>> links_; ///< Model links std::map<std::string, std::shared_ptr<Node<TensorT>>> nodes_; ///< Model nodes std::map<std::string, std::shared_ptr<Weight<TensorT>>> weights_; ///< Model nodes private: int id_ = -1; ///< Model ID std::string name_ = ""; ///< Model Name std::set<std::pair<std::string, std::string>> cyclic_pairs_; std::vector<std::shared_ptr<Node<TensorT>>> input_nodes_; std::vector<std::shared_ptr<Node<TensorT>>> output_nodes_; Eigen::Tensor<TensorT, 2> model_error_; Eigen::Tensor<TensorT, 2> model_metric_; int batch_size_ = 0; int memory_size_ = 0; friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(id_, name_, batch_size_, memory_size_, input_nodes_, output_nodes_, cyclic_pairs_, links_, nodes_, weights_ ); } }; template<typename TensorT> inline Model<TensorT>::Model(const Model<TensorT>& other) { id_ = other.id_; name_ = other.name_; // TODO: find a more efficient way to copy the links, nodes, and weights addLinks(other.getLinks()); addNodes(other.getNodes()); addWeights(other.getWeights()); cyclic_pairs_ = other.cyclic_pairs_; setInputAndOutputNodes(); // re-initialize the input/output nodes } template<typename TensorT> inline Model<TensorT>::Model(const int& id) : id_(id) { } template<typename TensorT> inline void Model<TensorT>::setId(const int& id) { id_ = id; } template<typename TensorT> inline int Model<TensorT>::getId() const { return id_; } template<typename TensorT> inline void Model<TensorT>::setName(const std::string& name) { name_ = name; } template<typename TensorT> inline std::string Model<TensorT>::getName() const { return name_; } template<typename TensorT> inline std::vector<std::shared_ptr<Node<TensorT>>> Model<TensorT>::getInputNodes() { return input_nodes_; } template<typename TensorT> inline void Model<TensorT>::setInputAndOutputNodes() { for (auto& node : nodes_) { if (node.second->getType() == NodeType::input && std::count(input_nodes_.begin(), input_nodes_.end(), node.second) == 0) { std::shared_ptr<Node<TensorT>> node_ptr_cpy = node.second; input_nodes_.push_back(node_ptr_cpy); } else if (node.second->getType() == NodeType::output && std::count(output_nodes_.begin(), output_nodes_.end(), node.second) == 0) { std::shared_ptr<Node<TensorT>> node_ptr_cpy = node.second; output_nodes_.push_back(node_ptr_cpy); } } } template<typename TensorT> inline std::vector<std::shared_ptr<Node<TensorT>>> Model<TensorT>::getOutputNodes() { return output_nodes_; } template<typename TensorT> inline std::vector<std::string> Model<TensorT>::getOutputNodeNames() const { std::vector<std::string> nodes; for (const auto& node : output_nodes_) { nodes.push_back(node->getName()); } return nodes; } template<typename TensorT> inline void Model<TensorT>::addNodes(const std::vector<Node<TensorT>>& nodes) { for (const Node<TensorT>& node : nodes) { std::shared_ptr<Node<TensorT>> node_ptr; node_ptr.reset(new Node<TensorT>(node)); auto found = nodes_.emplace(node.getName(), node_ptr); if (!found.second) { // TODO: move to debug log std::cout << "Node name " << node.getName() << " already exists!" << std::endl; } else { if (node.getType() == NodeType::input) { // Add layer_name of "Input" to ensure they are allocated to seperate layers? std::shared_ptr<Node<TensorT>> node_ptr_cpy = node_ptr; input_nodes_.push_back(node_ptr_cpy); } else if (node.getType() == NodeType::output) { // Add layer_name of "Output" to ensure they are allocated to seperate layers? std::shared_ptr<Node<TensorT>> node_ptr_cpy = node_ptr; output_nodes_.push_back(node_ptr_cpy); } } } } template<typename TensorT> inline Node<TensorT> Model<TensorT>::getNode(const std::string& node_name) const { if (!nodes_.empty() && nodes_.count(node_name) != 0) { return *nodes_.at(node_name); } else { // TODO: move to debug log std::cout << "Node name " << node_name << " not found!" << std::endl; } } template<typename TensorT> inline std::vector<Node<TensorT>> Model<TensorT>::getNodes() const { std::vector<Node<TensorT>> nodes; for (const auto& node : nodes_) { Node<TensorT> node_cpy(*node.second); nodes.push_back(node_cpy); } return nodes; } template<typename TensorT> inline std::map<std::string, std::shared_ptr<Node<TensorT>>>& Model<TensorT>::getNodesMap() { return nodes_; } template<typename TensorT> inline std::map<std::string, std::vector<std::string>> Model<TensorT>::getModuleNodeNameMap() const { std::map<std::string, std::vector<std::string>> module_to_node_names; for (const auto& node_map : nodes_) { std::vector<std::string> node_names = { node_map.first }; auto found = module_to_node_names.emplace(node_map.second->getModuleName(), node_names); if (!found.second) { module_to_node_names.at(node_map.second->getModuleName()).push_back(node_map.first); } } return module_to_node_names; } template<typename TensorT> inline void Model<TensorT>::removeNodes(const std::vector<std::string>& node_names) { for (const std::string& node_name : node_names) { // check for duplicate nodes (by id) if (nodes_.count(node_name) != 0) { nodes_.erase(node_name); } } // pruneLinks(); // Allow for dangling links } template<typename TensorT> inline void Model<TensorT>::addWeights(const std::vector<Weight<TensorT>>& weights) { for (const Weight<TensorT>& weight : weights) { std::shared_ptr<Weight<TensorT>> weight_ptr; weight_ptr.reset(new Weight<TensorT>(weight)); auto found = weights_.emplace(weight.getName(), weight_ptr); if (!found.second) { // TODO: move to debug log std::cout << "Weight name " << weight.getName() << " already exists!" << std::endl; } } } template<typename TensorT> inline Weight<TensorT> Model<TensorT>::getWeight(const std::string& weight_name) const { if (!weights_.empty() && weights_.count(weight_name) != 0) { //return *std::move(weights_.at(weight_name)); return *weights_.at(weight_name); } else { // TODO: move to debug log std::cout << "Weight name " << weight_name << " not found!" << std::endl; } } template<typename TensorT> inline std::vector<Weight<TensorT>> Model<TensorT>::getWeights() const { std::vector<Weight<TensorT>> weights; for (const auto& weight : weights_) { Weight<TensorT> weight_cpy(*weight.second); weights.push_back(weight_cpy); } return weights; } template<typename TensorT> inline std::map<std::string, std::shared_ptr<Weight<TensorT>>>& Model<TensorT>::getWeightsMap() { return weights_; } template<typename TensorT> inline void Model<TensorT>::removeWeights(const std::vector<std::string>& weight_names) { for (std::string const& weight_name : weight_names) { // check for duplicate weights (by id) if (weights_.count(weight_name) != 0) { weights_.erase(weight_name); } } pruneLinks(); } template<typename TensorT> inline void Model<TensorT>::addLinks(const std::vector<Link>& links) { for (const Link& link : links) { std::shared_ptr<Link> link_ptr; link_ptr.reset(new Link(link)); auto found = links_.emplace(link.getName(), link_ptr); if (!found.second) { // TODO: move to debug log std::cout << "Link name " << link.getName() << " already exists!" << std::endl; } } } template<typename TensorT> inline void Model<TensorT>::removeLinks(const std::vector<std::string>& link_names) { for (const std::string& link_name : link_names) { // check for duplicate links (by id) if (links_.count(link_name) != 0) { links_.erase(link_name); } } // pruneNodes(); // Allow dangling nodes to exist //pruneWeights(); // testing } template<typename TensorT> inline Link Model<TensorT>::getLink(const std::string& link_name) const { if (!links_.empty() && links_.count(link_name) != 0) { return *links_.at(link_name); } else { // TODO: move to debug log std::cout << "Link name " << link_name << " not found!" << std::endl; } } template<typename TensorT> inline std::vector<Link> Model<TensorT>::getLinks() const { std::vector<Link> links; for (const auto& link : links_) { Link link_cpy(*link.second); links.push_back(link_cpy); } return links; } template<typename TensorT> inline std::map<std::string, std::shared_ptr<Link>>& Model<TensorT>::getLinksMap() { return links_; } template<typename TensorT> inline bool Model<TensorT>::pruneNodes() { std::vector<std::string> node_names; if (nodes_.empty()) { return false; } for (const auto& node : nodes_) { bool found = false; // if (links_.empty()) { found = true; } for (const auto& link : links_) { if (node.second->getName() == link.second->getSourceNodeName() || node.second->getName() == link.second->getSinkNodeName()) { found = true; break; } } if (!found) { node_names.push_back(node.first); } } if (node_names.size() != 0) { removeNodes(node_names); return true; } else return false; } template<typename TensorT> inline bool Model<TensorT>::pruneWeights() { std::vector<std::string> weight_names; if (weights_.empty()) { return false; } for (const auto& weight : weights_) { bool found = false; // if (links_.empty()) { found = true; } for (const auto& link : links_) { if (weight.second->getName() == link.second->getWeightName()) { found = true; break; } } if (!found) { weight_names.push_back(weight.first); } } if (weight_names.size() != 0) { removeWeights(weight_names); return true; } else return false; } template<typename TensorT> inline bool Model<TensorT>::pruneLinks() { std::vector<std::string> link_names; if (links_.empty()) { return false; } for (const auto& link : links_) { bool source_node_found = false; bool sink_node_found = false; // if (nodes_.empty()) // { // source_node_found = true; // sink_node_found = true; // } for (const auto& node : nodes_) { if (node.second->getName() == link.second->getSourceNodeName()) source_node_found = true; if (node.second->getName() == link.second->getSinkNodeName()) sink_node_found = true; if (source_node_found && sink_node_found) break; } bool weight_found = false; // if (weights_.empty()) { weight_found = true; } for (const auto& weight : weights_) { if (weight.second->getName() == link.second->getWeightName()) { weight_found = true; break; } } if (!source_node_found || !sink_node_found) { link_names.push_back(link.first); } } if (link_names.size() != 0) { removeLinks(link_names); return true; } else return false; } template<typename TensorT> inline void Model<TensorT>::pruneModel(int iterations) { try { int cnt = 0; while (pruneLinks() || pruneWeights() || pruneNodes()) { if (cnt >= iterations) { break; } // std::cout<<"Pruning model iteration: "<<cnt<<std::endl; cnt += 1; } } catch (std::exception& e) { printf("Exception: %s", e.what()); } } template<typename TensorT> inline bool Model<TensorT>::checkNodeNames(const std::vector<std::string> node_names) { bool nodes_found = true; for (const std::string& node_name : node_names) { if (nodes_.empty() || nodes_.count(node_name) == 0) { nodes_found = false; std::cout << "Node name " << node_name << " not found!" << std::endl; } } return nodes_found; } template<typename TensorT> inline bool Model<TensorT>::checkLinkNames(const std::vector<std::string> link_names) { bool links_found = true; for (const std::string& link_name : link_names) { if (links_.empty() || links_.count(link_name) == 0) { links_found = false; std::cout << "Link name " << link_name << " not found!" << std::endl; } } return links_found; } template<typename TensorT> inline bool Model<TensorT>::checkWeightNames(const std::vector<std::string> weight_names) { bool weights_found = true; for (const std::string& weight_name : weight_names) { if (weights_.empty() || weights_.count(weight_name) == 0) { weights_found = false; std::cout << "Weight name " << weight_name << " not found!" << std::endl; } } return weights_found; } template<typename TensorT> inline void Model<TensorT>::checkCompleteInputToOutput_(const std::string& node_cur, std::set<std::string>& found_nodes, std::set<std::string>& output_nodes, std::set<std::string>& found_output_nodes) { for (auto& link_map : links_) { if (link_map.second->getSourceNodeName() == node_cur) { std::string node_next = link_map.second->getSinkNodeName(); if (output_nodes.count(node_next)) { found_output_nodes.insert(node_next); if (found_output_nodes.size() == output_nodes.size()) return; } else if (found_nodes.count(node_next)) { continue; // found a loop } else { found_nodes.insert(node_next); checkCompleteInputToOutput_(node_next, found_nodes, output_nodes, found_output_nodes); if (found_output_nodes.size() == output_nodes.size()) return; } } } }; template<typename TensorT> inline void Model<TensorT>::checkCompleteOutputToInput_(const std::string& node_cur, std::set<std::string>& found_nodes, std::set<std::string>& output_nodes, std::set<std::string>& found_output_nodes) { for (auto& link_map : links_) { if (link_map.second->getSinkNodeName() == node_cur) { std::string node_next = link_map.second->getSourceNodeName(); if (output_nodes.count(node_next)) { found_output_nodes.insert(node_next); if (found_output_nodes.size() == output_nodes.size()) return; } else if (found_nodes.count(node_next)) { continue; // found a loop } else { found_nodes.insert(node_next); checkCompleteOutputToInput_(node_next, found_nodes, output_nodes, found_output_nodes); if (found_output_nodes.size() == output_nodes.size()) return; } } } }; template<typename TensorT> inline bool Model<TensorT>::checkCompleteInputToOutput() { // Create a set of target output nodes std::set<std::string> output_nodes, found_output_nodes; for (auto& node : nodes_) if (node.second->getType() == NodeType::output) output_nodes.insert(node.second->getName()); // Create a set of target output nodes std::set<std::string> input_nodes, found_input_nodes; for (auto& node : nodes_) if (node.second->getType() == NodeType::input) input_nodes.insert(node.second->getName()); // Start at each input node and walk through the graph until // a previous node is reached or an output node is reached for (const std::string& node : input_nodes) { std::set<std::string> found_nodes; found_nodes.insert(node); checkCompleteInputToOutput_(node, found_nodes, output_nodes, found_output_nodes); if (found_output_nodes.size() == output_nodes.size()) break; } // Start at each output node and walk through the graph until // a previous node is reached or an input node is reached for (const std::string& node : output_nodes) { std::set<std::string> found_nodes; found_nodes.insert(node); checkCompleteOutputToInput_(node, found_nodes, input_nodes, found_input_nodes); if (found_input_nodes.size() == input_nodes.size()) break; } if (found_input_nodes.size() == input_nodes.size() && found_output_nodes.size() == output_nodes.size()) return true; else return false; } template<typename TensorT> inline bool Model<TensorT>::checkLinksNodeAndWeightNames(std::vector<std::string>& nodes_not_found, std::vector<std::string>& weights_not_found) { bool link_names_check = true; for (const auto& link_map : links_) { if (!checkNodeNames({ link_map.second->getSourceNodeName() })) { link_names_check = false; nodes_not_found.push_back(link_map.second->getSourceNodeName()); } if (!checkNodeNames({ link_map.second->getSinkNodeName() })) { link_names_check = false; nodes_not_found.push_back(link_map.second->getSinkNodeName()); } if (!checkWeightNames({ link_map.second->getWeightName() })) { link_names_check = false; weights_not_found.push_back(link_map.second->getWeightName()); } } return link_names_check; } template<typename TensorT> inline bool Model<TensorT>::removeIsolatedNodes() { // key/value pair of node name and source/sink count pair std::map<std::string, std::pair<int, int>> node_counts; // count all sink/source connections for each node for (const auto& link_map : links_) { // source if (nodes_.at(link_map.second->getSourceNodeName())->getType() == NodeType::hidden) { auto found = node_counts.emplace(link_map.second->getSourceNodeName(), std::make_pair(1, 0)); if (!found.second) { node_counts[link_map.second->getSourceNodeName()].first += 1; } } // sink if (nodes_.at(link_map.second->getSinkNodeName())->getType() == NodeType::hidden && nodes_.at(link_map.second->getSourceNodeName())->getType() != NodeType::bias) { auto found = node_counts.emplace(link_map.second->getSinkNodeName(), std::make_pair(0, 1)); if (!found.second) { node_counts[link_map.second->getSinkNodeName()].second += 1; } } } bool dead_end_node_found = false; for (const auto& node_count : node_counts) { if (node_count.second.first == 0 || node_count.second.second == 0) { removeNodes({ node_count.first }); dead_end_node_found = true; } } return dead_end_node_found; } template<typename TensorT> inline std::list<int>* Model<TensorT>::convertToAdjacencyList(std::map<int, std::string>& node_id_map, int& node_cnt) { // create a map of node id to node name (excluding bias nodes) node_id_map.clear(); node_cnt = 0; for (auto& node_map : nodes_) { if (node_map.second->getType() != NodeType::bias) { ++node_cnt; node_map.second->setId(node_cnt); node_id_map.emplace(node_cnt, node_map.first); } else { node_map.second->setId(-1); } } // create the DFS trees (excluding bias nodes) std::list<int> *adj; adj = new std::list<int>[node_cnt]; // add the actual nodes for (auto& link_map : links_) if (nodes_.at(link_map.second->getSourceNodeName())->getType() != NodeType::bias) adj[nodes_.at(link_map.second->getSourceNodeName())->getId() - 1].push_back(nodes_.at(link_map.second->getSinkNodeName())->getId()); return adj; } template<typename TensorT> inline void Model<TensorT>::findCycles() { std::map<int, std::string> node_id_map; int node_cnt; std::list<int> *adj = convertToAdjacencyList(node_id_map, node_cnt); CircuitFinder CF(adj, node_cnt); CF.run(); cyclic_pairs_.clear(); for (const auto& source_sink : CF.getCycles()) { if (nodes_.at(node_id_map.at(source_sink.second))->getType() == NodeType::recursive) // enforce order of recursive nodes cyclic_pairs_.insert(std::make_pair(node_id_map.at(source_sink.second), node_id_map.at(source_sink.first))); else cyclic_pairs_.insert(std::make_pair(node_id_map.at(source_sink.first), node_id_map.at(source_sink.second))); } } template<typename TensorT> inline void Model<TensorT>::addCyclicPairs(const std::pair<std::string, std::string>& cyclic_pair) { cyclic_pairs_.insert(cyclic_pair); } template<typename TensorT> inline std::set<std::pair<std::string, std::string>> Model<TensorT>::getCyclicPairs() const { return cyclic_pairs_; } template<typename TensorT> inline void Model<TensorT>::setError(const Eigen::Tensor<TensorT, 2> model_error) { model_error_ = model_error; } template<typename TensorT> inline Eigen::Tensor<TensorT, 2> Model<TensorT>::getError() const { return model_error_; }; template<typename TensorT> inline void Model<TensorT>::setMetric(const Eigen::Tensor<TensorT, 2> model_metric) { model_metric_ = model_metric; } template<typename TensorT> inline Eigen::Tensor<TensorT, 2> Model<TensorT>::getMetric() const { return model_metric_; }; template<typename TensorT> inline void Model<TensorT>::initNodeTensorIndices() { for (auto& node_map : nodes_) { node_map.second->setTensorIndex(std::make_pair(-1, -1)); } } template<typename TensorT> inline void Model<TensorT>::initWeightTensorIndices() { for (auto& weight_map : weights_) { weight_map.second->clearTensorIndex(); } } template<typename TensorT> inline void Model<TensorT>::setBatchAndMemorySizes(const int & batch_size, const int & memory_size) { batch_size_ = batch_size; memory_size_ = memory_size; } template<typename TensorT> inline std::pair<int, int> Model<TensorT>::getBatchAndMemorySizes() const { return std::pair<int, int>(batch_size_,memory_size_); } template<typename TensorT> inline void Model<TensorT>::clear() { id_ = -1; name_ = ""; cyclic_pairs_.clear(); input_nodes_.clear(); output_nodes_.clear(); batch_size_ = 0; memory_size_ = 0; weights_.clear(); nodes_.clear(); links_.clear(); } } #endif //EVONET_MODEL_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_METRICFUNCTIONTENSOR_H #define EVONET_METRICFUNCTIONTENSOR_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif #include <unsupported/Eigen/CXX11/Tensor> namespace EvoNet { /** @brief Base class for all model metric functions. NOTE: Unlike LossFunctions that return the results on a per batch basis, model metric functions return a single value across all batch results */ template<typename TensorT, typename DeviceT> class MetricFunctionTensorOp { public: MetricFunctionTensorOp() = default; MetricFunctionTensorOp(std::string& reduction_func) : reduction_func_(reduction_func) {}; ///< Options are Sum, Mean, Var virtual ~MetricFunctionTensorOp() = default; virtual std::string getName() = 0; virtual void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const = 0; void setReductionFunc(std::string& reduction_func) { reduction_func_ = reduction_func; } std::string getReductionFunc() { return reduction_func_; } protected: TensorT threshold_positive_ = 0.9; TensorT threshold_negative_ = 0.1; std::string reduction_func_ = "Sum"; }; /** @brief Accuracy metric function for binary classification. The class returns the average classification accuracy across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification accuracy = (TP + TN)/(TP + TN + FP + FN) */ template<typename TensorT, typename DeviceT> class AccuracyBCTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: AccuracyBCTensorOp() = default; AccuracyBCTensorOp(const TensorT& classification_threshold) : classification_threshold_(classification_threshold) {}; std::string getName() override { return "AccuracyBCTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // calculate the confusion matrix auto predicted_chip = predicted_tensor.chip(time_step, 1); auto tp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto tn = (predicted_chip < expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto fp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto fn = (predicted_chip < expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // calculate the accuracy auto accuracy = (tp.sum() + tn.sum()) / (tp.sum() + tn.sum() + fp.sum() + fn.sum()); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += accuracy; }; TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; }; /** @brief Accuracy metric function for multiclass classification. The class returns the micro average classification accuracy across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification accuracy = (TP + TN)/(TP + TN + FP + FN) */ template<typename TensorT, typename DeviceT> class AccuracyMCMicroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() override { return "AccuracyMCMicroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // allocate temporary memory TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size*layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif // find the maximum value for each batch auto predicted_chip = predicted_tensor.chip(time_step, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> max_tensor(tmp_data, batch_size, layer_size); max_tensor.device(device) = predicted_chip.maximum(Eigen::array<int, 1>({ 1 })).broadcast(Eigen::array<int, 2>({ 1, layer_size })); // calculate the confusion matrix auto tp = (predicted_chip.chip(0, 2) >= (max_tensor - max_tensor.constant(TensorT(1e-6))) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto tn = (predicted_chip.chip(0, 2) < max_tensor && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto fp = (predicted_chip.chip(0, 2) >= (max_tensor - max_tensor.constant(TensorT(1e-6))) && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto fn = (predicted_chip.chip(0, 2) < max_tensor && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // calculate the accuracy auto accuracy = (tp.sum() + tn.sum()) / (tp.sum() + tn.sum() + fp.sum() + fn.sum()); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += accuracy; // deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif //// DEBUG //std::cout << "TP: " << tp << std::endl; //std::cout << "TN: " << tn << std::endl; //std::cout << "FP: " << fp << std::endl; //std::cout << "FN: " << fn << std::endl; //std::cout << "Accuracy: " << accuracy << std::endl; }; }; /** @brief Accuracy metric function for multiclass classification. The class returns the macro average classification accuracy across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification accuracy = (TP + TN)/(TP + TN + FP + FN) */ template<typename TensorT, typename DeviceT> class AccuracyMCMacroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() override { return "AccuracyMCMacroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // find the maximum value for each batch auto predicted_chip = predicted_tensor.chip(time_step, 1); // TODO... // Sum on the per batch level and then average e.g. / accuracy.constant(TensorT(batch_size)); }; }; /** @brief Precision metric function for binary classification. The class returns the average classification precision across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification precision = TP/(TP + FP) */ template<typename TensorT, typename DeviceT> class PrecisionBCTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: PrecisionBCTensorOp() = default; PrecisionBCTensorOp(const TensorT& classification_threshold) : classification_threshold_(classification_threshold) {}; std::string getName() override { return "PrecisionBCTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // calculate the confusion matrix auto predicted_chip = predicted_tensor.chip(time_step, 1); auto tp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto fp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // calculate the precision auto precision = tp.sum() / (tp.sum() + fp.sum()); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += precision; }; TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; }; /** @brief Precision metric function for multiclass classification. The class returns the micro average classification precision across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification precision = TP/(TP + FP) */ template<typename TensorT, typename DeviceT> class PrecisionMCMicroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() override { return "PrecisionMCMicroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // find the maximum value for each batch auto predicted_chip = predicted_tensor.chip(time_step, 1); auto max_tensor = predicted_chip.maximum(Eigen::array<int, 1>({ 1 })).broadcast(Eigen::array<int, 2>({ 1, layer_size })); // calculate the confusion matrix auto tp = (predicted_chip.chip(0, 2) >= (max_tensor - max_tensor.constant(TensorT(1e-6))) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto fp = (predicted_chip.chip(0, 2) >= (max_tensor - max_tensor.constant(TensorT(1e-6))) && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // calculate the precision auto precision = tp.sum() / (tp.sum() + fp.sum()); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += precision; //// DEBUG //std::cout << "predicted_chip.chip(0, 1): " << predicted_chip << std::endl; //std::cout << "max_tensor: " << max_tensor << std::endl; //std::cout << "expected_tensor: " << expected_tensor << std::endl; //std::cout << "TP: " << tp << std::endl; //std::cout << "FP: " << fp << std::endl; //std::cout << "precision: " << precision << std::endl; }; }; /** @brief Precision metric function for multiclass classification. The class returns the macro average classification precision across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification precision = TP/(TP + FP) */ template<typename TensorT, typename DeviceT> class PrecisionMCMacroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() override { return "PrecisionMCMacroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // find the maximum value for each batch auto predicted_chip = predicted_tensor.chip(time_step, 1); // TODO... // Sum on the per batch level and then average e.g. / precision.constant(TensorT(batch_size)); }; }; /** @brief Recall metric function for binary classification. The class returns the average classification recall across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification recall = TP /(TP + FN) */ template<typename TensorT, typename DeviceT> class RecallBCTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: RecallBCTensorOp() = default; RecallBCTensorOp(const TensorT& classification_threshold) : classification_threshold_(classification_threshold) {}; std::string getName() override { return "RecallBCTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // calculate the confusion matrix auto predicted_chip = predicted_tensor.chip(time_step, 1); auto tp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto fn = (predicted_chip < expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // calculate the recall auto recall = tp.sum() / (tp.sum() + fn.sum()); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += recall; }; TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; }; /** @brief Recall metric function for multiclass classification. The class returns the micro average classification recall across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification recall = TP /(TP + FN) */ template<typename TensorT, typename DeviceT> class RecallMCMicroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() override { return "RecallMCMicroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // find the maximum value for each batch auto predicted_chip = predicted_tensor.chip(time_step, 1); auto max_tensor = predicted_chip.maximum(Eigen::array<int, 1>({ 1 })).broadcast(Eigen::array<int, 2>({ 1, layer_size })); // calculate the confusion matrix auto tp = (predicted_chip.chip(0, 2) >= (max_tensor - max_tensor.constant(TensorT(1e-6))) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); auto fn = (predicted_chip.chip(0, 2) < max_tensor && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // calculate the recall auto recall = tp.sum() / (tp.sum() + fn.sum()); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += recall; }; }; /** @brief Recall metric function for multiclass classification. The class returns the macro average classification recall across all batches where an expected true value > 0.9 and an expected false value < 0.9 Where classification recall = TP /(TP + FN) */ template<typename TensorT, typename DeviceT> class RecallMCMacroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() override { return "RecallMCMacroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // find the maximum value for each batch auto predicted_chip = predicted_tensor.chip(time_step, 1); // TODO... // Sum on the per batch level and then average e.g. / recall.constant(TensorT(batch_size)); }; }; /** @brief PredictionBias metric function. Where Prediction bias = average of predictions - average of labels in the data set */ template<typename TensorT, typename DeviceT> class PredictionBiasTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() { return "PredictionBiasTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); // TODO... }; }; /** @brief F1 score metric function for binary classification. The class returns the average F1 score across all batches Where F1 score = 2*precision*recall/(precision + recall) and precision = TP/(TP + FP) and recall = TP/(TP + FN) */ template<typename TensorT, typename DeviceT> class F1ScoreBCTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: F1ScoreBCTensorOp() = default; F1ScoreBCTensorOp(const TensorT& classification_threshold) : classification_threshold_(classification_threshold) {}; std::string getName() { return "F1ScoreBCTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); //// calculate the confusion matrix //auto predicted_chip = predicted_tensor.chip(time_step, 1); //auto tp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); //auto fp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); //auto fn = (predicted_chip < expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); //// calculate the F1 score //auto precision = tp.sum()/(tp.sum() + fp.sum()); //auto recall = tp.sum() / (tp.sum() + fn.sum()); //auto f1score = precision.constant(TensorT(2))*precision*recall / (precision + recall); //error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += f1score; }; TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; }; /** @brief F1 score metric function for multiclass classification. The class returns the micro average F1 score across all batches Where F1 score = 2*precision*recall/(precision + recall) and precision = TP/(TP + FP) and recall = TP/(TP + FN) */ template<typename TensorT, typename DeviceT> class F1ScoreMCMicroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() { return "F1ScoreMCMicroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); // // allocate temporary memory // TensorT* tmp_data; // if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { // tmp_data = new TensorT[batch_size*layer_size]; // } //#if COMPILE_WITH_CUDA // else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { // size_t bytes = batch_size * layer_size * sizeof(TensorT); // assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); // } //#endif // // find the maximum value for each batch // auto predicted_chip = predicted_tensor.chip(time_step, 1); // Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> max_tensor(tmp_data, batch_size, layer_size); // max_tensor = predicted_chip.maximum(Eigen::array<int, 1>({ 1 })).broadcast(Eigen::array<int, 2>({ 1, layer_size })); // // // calculate the confusion matrix // auto tp = (predicted_chip.chip(0, 2) >= (max_tensor - max_tensor.constant(TensorT(1e-6))) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // auto fp = (predicted_chip.chip(0, 2) >= (max_tensor - max_tensor.constant(TensorT(1e-6))) && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // auto fn = (predicted_chip.chip(0, 2) < max_tensor && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); // // // calculate the F1 score // auto precision = tp.sum() / (tp.sum() + fp.sum()); // auto recall = tp.sum() / (tp.sum() + fn.sum()); // auto f1score = precision.constant(TensorT(2))*precision*recall / (precision + recall); // error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += f1score; // // // deallocate temporary memory // if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { // delete[] tmp_data; // } //#if COMPILE_WITH_CUDA // else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { // assert(cudaFree(tmp_data) == cudaSuccess); // } //#endif //// DEBUG //std::cout << "precision: " << precision << std::endl; //std::cout << "recall: " << recall << std::endl; //std::cout << "f1score: " << f1score << std::endl; }; }; /** @brief F1 score metric function for multiclass classification. The class returns the macro average F1 score across all batches Where F1 score = 2*precision*recall/(precision + recall) and precision = TP/(TP + FP) and recall = TP/(TP + FN) */ template<typename TensorT, typename DeviceT> class F1ScoreMCMacroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() { return "F1ScoreMCMacroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); //// calculate the confusion matrix //auto predicted_chip = predicted_tensor.chip(time_step, 1); //auto tp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); //auto fp = (predicted_chip >= expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor < expected_tensor.constant(TensorT(this->threshold_negative_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); //auto fn = (predicted_chip < expected_tensor.constant(TensorT(this->classification_threshold_)) && expected_tensor > expected_tensor.constant(TensorT(this->threshold_positive_))).select(expected_tensor.constant(TensorT(1)), expected_tensor.constant(TensorT(0))); //// calculate the F1 score [TODO: update as this is not correct...] //auto precision = tp.sum() / (tp.sum() + fp.sum()); //auto recall = tp.sum() / (tp.sum() + fn.sum()); //auto f1score = precision.constant(TensorT(2))*precision*recall / (precision + recall); //error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += f1score; }; }; /** @brief AUROC metric function. Where ROC point per batch = sensitivity/FPR and sensitivity = recall = TP/(TP + FN) and FPR = FP/(FP + TN) And AUROC = area under the curve of sensitivity vs. FPR */ template<typename TensorT, typename DeviceT> class AUROCTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() { return "AUROCTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); }; }; /** @brief Matthews correlation coefficient (binary 2 class problems) metric function. The class retuns the average metthews correlation coefficient across all batches Where MCC = TP*TN-FP*FN/sqrt((TP+FP)(TP+FN)(TN+FP)(TN+FN)) */ template<typename TensorT, typename DeviceT> class MCCBCTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() { return "MCCBCTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); // TODO... }; }; /** @brief Matthews correlation coefficient metric function for multiclass classification. The class retuns the micro average metthews correlation coefficient across all batches Where MCC = TP*TN-FP*FN/sqrt((TP+FP)(TP+FN)(TN+FP)(TN+FN)) */ template<typename TensorT, typename DeviceT> class MCCMCMicroTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() { return "MCCMCMicroTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); // TODO... }; }; /** @brief MAE Mean Absolute Error metric function. Where MAE = 1/N * Sum[ abs(xi-xhat) ] */ template<typename TensorT, typename DeviceT> class MAETensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: using MetricFunctionTensorOp<TensorT, DeviceT>::MetricFunctionTensorOp; std::string getName() { return "MAETensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += ((expected_tensor - predicted_chip).pow(TensorT(2)).pow(TensorT(0.5)) / expected_tensor.constant(TensorT(layer_size) * TensorT(batch_size))).sum(); }; }; /** @brief CosineSimilarity metric function. Where CosineSimilarity = A*B/(||A||*||B||) Note: need to divide by the batch size if the average value over all batches is needed */ template<typename TensorT, typename DeviceT> class CosineSimilarityTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: CosineSimilarityTensorOp() = default; CosineSimilarityTensorOp(std::string& reduction_func) : MetricFunctionTensorOp<TensorT, DeviceT>(reduction_func) {}; std::string getName() { return "CosineSimilarityTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> expected_tensor(expected, batch_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto dot_prod = (predicted_chip * expected_tensor).sum(Eigen::array<Eigen::Index, 1>({1})); // dim 1 batch_size auto predicted_unit = (predicted_chip.pow(TensorT(2)).sum(Eigen::array<Eigen::Index, 1>({ 1 }))).pow(TensorT(0.5)); // dim 1 batch_size auto expected_unit = (expected_tensor.pow(TensorT(2)).sum(Eigen::array<Eigen::Index, 1>({ 1 }))).pow(TensorT(0.5)); // dim 1 batch_size // allocate temporary memory TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * 1]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * 1 * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> cosine_similarity(tmp_data, batch_size, 1); cosine_similarity.device(device) = dot_prod / (predicted_unit * expected_unit); if (this->reduction_func_ == "Sum") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += cosine_similarity.sum(); else if (this->reduction_func_ == "Mean") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += (cosine_similarity / cosine_similarity.constant(TensorT(batch_size))).sum(); else if (this->reduction_func_ == "Var") { auto mean = (cosine_similarity / cosine_similarity.constant(TensorT(batch_size))).sum(Eigen::array<int, 1>({ 0 })).broadcast(Eigen::array<int, 1>({ batch_size })); auto var = ((mean - cosine_similarity.chip(0, 1)).pow(TensorT(2)) / mean.constant(TensorT(batch_size) - 1)).sum(); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += var; } // deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief PearsonR metric function. Where PearsonR = Rxy = Sum(i=1 to n)[(xi-xhat)(yi-yhat)]/(sqrt(Sum(i=1 to n)[(xi-xhat)^2]) * sqrt(Sum(i=1 to n)[(yi-yhat)^2])) Note: need to divide by the batch size if the average value over all batches is needed */ template<typename TensorT, typename DeviceT> class PearsonRTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: PearsonRTensorOp() = default; PearsonRTensorOp(std::string& reduction_func) : MetricFunctionTensorOp<TensorT, DeviceT>(reduction_func) {}; std::string getName() { return "PearsonRTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> expected_tensor(expected, batch_size, layer_size, 1, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 5>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto cov = ((predicted_chip.chip(0, 2) - predicted_chip.mean(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 3>({ 1, layer_size, 1 }))) * (expected_tensor.chip(0, 2) - expected_tensor.mean(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 3>({ 1, layer_size, 1 }))) ).sum(Eigen::array<Eigen::Index, 1>({ 1 })); // Dim 1 batch_size auto predicted_stdev = ((predicted_chip.chip(0, 2) - predicted_chip.mean(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 3>({ 1, layer_size, 1 })) ).pow(TensorT(2)).sum(Eigen::array<Eigen::Index, 1>({ 1 })).pow(TensorT(0.5))); // Dim 1 batch_size auto expected_stdev = ((expected_tensor.chip(0, 2) - expected_tensor.mean(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 3>({ 1, layer_size, 1 })) ).pow(TensorT(2)).sum(Eigen::array<Eigen::Index, 1>({ 1 })).pow(TensorT(0.5))); // Dim 1 batch_size // allocate temporary memory TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * 1]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * 1 * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> PearsonR(tmp_data, batch_size, 1); PearsonR.device(device) = cov / (predicted_stdev * expected_stdev); if (this->reduction_func_ == "Sum") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += PearsonR.sum(); else if (this->reduction_func_ == "Mean") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += (PearsonR / PearsonR.constant(TensorT(batch_size))).sum(); else if (this->reduction_func_ == "Var") { auto mean = (PearsonR / PearsonR.constant(TensorT(batch_size))).sum(Eigen::array<int, 1>({ 0 })).broadcast(Eigen::array<int, 1>({ batch_size })); auto var = ((mean - PearsonR.chip(0, 1)).pow(TensorT(2)) / mean.constant(TensorT(batch_size) - 1)).sum(); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += var; } // deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief EuclideanDist metric function. NOTE: useful for data in the range of (-inf, inf) */ template<typename TensorT, typename DeviceT> class EuclideanDistTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: EuclideanDistTensorOp() = default; EuclideanDistTensorOp(std::string& reduction_func) : MetricFunctionTensorOp<TensorT, DeviceT>(reduction_func) {}; std::string getName() { return "EuclideanDistTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> expected_tensor(expected, batch_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); // allocate temporary memory TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * 1]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * 1 * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> euclidean_dist(tmp_data, batch_size, 1); euclidean_dist.device(device) = ((expected_tensor - predicted_chip).pow(TensorT(2))).sum(Eigen::array<int, 1>({ 1 })).sqrt(); if (this->reduction_func_ == "Sum") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += euclidean_dist.sum(); else if (this->reduction_func_ == "Mean") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += (euclidean_dist / euclidean_dist.constant(TensorT(batch_size))).sum(); else if (this->reduction_func_ == "Var") { auto mean = (euclidean_dist / euclidean_dist.constant(TensorT(batch_size))).sum(Eigen::array<int, 1>({ 0 })).broadcast(Eigen::array<int, 1>({ batch_size })); auto var = ((mean - euclidean_dist.chip(0, 1)).pow(TensorT(2)) / mean.constant(TensorT(batch_size) - 1)).sum(); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += var; } // deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief ManhattanDist metric function. NOTE: useful for data in the range of (-inf, inf) */ template<typename TensorT, typename DeviceT> class ManhattanDistTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: ManhattanDistTensorOp() = default; ManhattanDistTensorOp(std::string& reduction_func) : MetricFunctionTensorOp<TensorT, DeviceT>(reduction_func) {}; std::string getName() { return "ManhattanDistTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> expected_tensor(expected, batch_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); // allocate temporary memory TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * 1]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * 1 * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> euclidean_dist(tmp_data, batch_size, 1); euclidean_dist.device(device) = ((expected_tensor - predicted_chip).pow(TensorT(2)).sqrt()).sum(Eigen::array<int, 1>({ 1 })); if (this->reduction_func_ == "Sum") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += euclidean_dist.sum(); else if (this->reduction_func_ == "Mean") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += (euclidean_dist / euclidean_dist.constant(TensorT(batch_size))).sum(); else if (this->reduction_func_ == "Var") { auto mean = (euclidean_dist / euclidean_dist.constant(TensorT(batch_size))).sum(Eigen::array<int, 1>({ 0 })).broadcast(Eigen::array<int, 1>({ batch_size })); auto var = ((mean - euclidean_dist.chip(0, 1)).pow(TensorT(2)) / mean.constant(TensorT(batch_size) - 1)).sum(); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += var; } // deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief JeffreysAndMatusitaDist metric function. NOTE: only useful for data in the range of [0, inf) */ template<typename TensorT, typename DeviceT> class JeffreysAndMatusitaDistTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: JeffreysAndMatusitaDistTensorOp() = default; JeffreysAndMatusitaDistTensorOp(std::string& reduction_func) : MetricFunctionTensorOp<TensorT, DeviceT>(reduction_func) {}; std::string getName() { return "JeffreysAndMatusitaDistTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> expected_tensor(expected, batch_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); // allocate temporary memory TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * 1]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * 1 * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> euclidean_dist(tmp_data, batch_size, 1); euclidean_dist.device(device) = ((expected_tensor.cwiseMax(expected_tensor.constant(TensorT(0))).sqrt() - predicted_chip.cwiseMax(predicted_chip.constant(TensorT(0))).sqrt()).pow(TensorT(2))).sum(Eigen::array<int, 1>({ 1 })).sqrt(); if (this->reduction_func_ == "Sum") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += euclidean_dist.sum(); else if (this->reduction_func_ == "Mean") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += (euclidean_dist / euclidean_dist.constant(TensorT(batch_size))).sum(); else if (this->reduction_func_ == "Var") { auto mean = (euclidean_dist / euclidean_dist.constant(TensorT(batch_size))).sum(Eigen::array<int, 1>({ 0 })).broadcast(Eigen::array<int, 1>({ batch_size })); auto var = ((mean - euclidean_dist.chip(0, 1)).pow(TensorT(2)) / mean.constant(TensorT(batch_size) - 1)).sum(); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += var; } // deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief Logarithmic Distance metric function. NOTE: only useful for data in the range of [0, inf) */ template<typename TensorT, typename DeviceT> class LogarithmicDistTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: LogarithmicDistTensorOp() = default; LogarithmicDistTensorOp(std::string& reduction_func) : MetricFunctionTensorOp<TensorT, DeviceT>(reduction_func) {}; std::string getName() { return "LogarithmicDistTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> expected_tensor(expected, batch_size, layer_size, 1, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 5>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto diff = expected_tensor - predicted_chip; auto min_offset = diff.chip(0, 2) - diff.minimum(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 3>({ 1, layer_size, 1 })) + diff.chip(0, 2).constant(TensorT(1)); // allocate temporary memory TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * 1]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * 1 * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> euclidean_dist(tmp_data, batch_size, 1); euclidean_dist.device(device) = min_offset.log().sum(Eigen::array<int, 1>({ 1 })); if (this->reduction_func_ == "Sum") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += euclidean_dist.sum(); else if (this->reduction_func_ == "Mean") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += (euclidean_dist / euclidean_dist.constant(TensorT(batch_size))).sum(); else if (this->reduction_func_ == "Var") { auto mean = (euclidean_dist / euclidean_dist.constant(TensorT(batch_size))).sum(Eigen::array<int, 1>({ 0 })).broadcast(Eigen::array<int, 1>({ batch_size })); auto var = ((mean - euclidean_dist.chip(0, 1)).pow(TensorT(2)) / mean.constant(TensorT(batch_size) - 1)).sum(); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += var; } // deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief PercentDifference metric function. NOTE: useful for data in the range of (-inf, inf) */ template<typename TensorT, typename DeviceT> class PercentDifferenceTensorOp : public MetricFunctionTensorOp<TensorT, DeviceT> { public: PercentDifferenceTensorOp() = default; PercentDifferenceTensorOp(std::string& reduction_func) : MetricFunctionTensorOp<TensorT, DeviceT>(reduction_func) {}; std::string getName() { return "PercentDifferenceTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& n_metrics, const int& time_step, const int& metric_index, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> expected_tensor(expected, batch_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, n_metrics, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto perc_diff_selected = (expected_tensor == expected_tensor.constant(TensorT(0))).select(expected_tensor.constant(TensorT(0)), ((expected_tensor - predicted_chip) / expected_tensor).pow(TensorT(2)).sqrt() ); // allocate temporary memory TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * 1]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * 1 * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> perce_diff(tmp_data, batch_size, 1); perce_diff.device(device) = perc_diff_selected.sum(Eigen::array<int, 1>({ 1 })); if (this->reduction_func_ == "Sum") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += perce_diff.sum(); else if (this->reduction_func_ == "Mean") error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += (perce_diff / perce_diff.constant(TensorT(batch_size))).sum(); else if (this->reduction_func_ == "Var") { auto mean = (perce_diff / perce_diff.constant(TensorT(batch_size))).sum(Eigen::array<int, 1>({ 0 })).broadcast(Eigen::array<int, 1>({ batch_size })); auto var = ((mean - perce_diff.chip(0, 1)).pow(TensorT(2)) / mean.constant(TensorT(batch_size) - 1)).sum(); error_tensor.chip(metric_index, 0).chip(time_step, 0).device(device) += var; } // deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; } #endif //EVONET_METRICFUNCTIONTENSOR_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Solver3 test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/SolverTensor.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(solver3) BOOST_AUTO_TEST_CASE(constructorSGDOp) { SGDTensorOp<float, Eigen::DefaultDevice>* ptrSGD = nullptr; SGDTensorOp<float, Eigen::DefaultDevice>* nullPointerSGD = nullptr; BOOST_CHECK_EQUAL(ptrSGD, nullPointerSGD); } BOOST_AUTO_TEST_CASE(destructorSGDOp) { SGDTensorOp<float, Eigen::DefaultDevice>* ptrSGD = nullptr; ptrSGD = new SGDTensorOp<float, Eigen::DefaultDevice>(); delete ptrSGD; } BOOST_AUTO_TEST_CASE(settersAndGetters) { SGDTensorOp<float, Eigen::DefaultDevice> operation(10.0f, 1.0f, 0.55f); BOOST_CHECK_EQUAL(operation.getName(), "SGDTensorOp"); BOOST_CHECK_CLOSE(operation.getGradientThreshold(), 10.0, 1e4); BOOST_CHECK_CLOSE(operation.getGradientNoiseSigma(), 1.0, 1e4); BOOST_CHECK_CLOSE(operation.getGradientNoiseGamma(), 0.55, 1e4); BOOST_CHECK_CLOSE(operation.getEps(), 1e-24, 1e4); //BOOST_CHECK_EQUAL(operation.getParameters(), "gradient_threshold:1000000.000000;gradient_noise_sigma:1.000000;gradient_noise_gamma:0.550000;learning_rate:0.900000;momentum:0.100000;momentum_prev:0.000000"); SSDTensorOp<float, Eigen::DefaultDevice> ssd_op(10.0f, 1.0f, 0.55f); BOOST_CHECK_EQUAL(ssd_op.getName(), "SSDTensorOp"); BOOST_CHECK_CLOSE(ssd_op.getGradientThreshold(), 10.0, 1e4); BOOST_CHECK_CLOSE(ssd_op.getGradientNoiseSigma(), 1.0, 1e4); BOOST_CHECK_CLOSE(ssd_op.getGradientNoiseGamma(), 0.55, 1e4); BOOST_CHECK_CLOSE(ssd_op.getEps(), 1e-24, 1e4); //BOOST_CHECK_EQUAL(ssd_op.getParameters(), "gradient_threshold:1000000.000000;gradient_noise_sigma:1.000000;gradient_noise_gamma:0.550000;learning_rate:0.900000;momentum:0.100000;momentum_prev:0.000000"); AdamTensorOp<float, Eigen::DefaultDevice> adam_op(10.0f, 1.0f, 0.55f); BOOST_CHECK_EQUAL(adam_op.getName(), "AdamTensorOp"); BOOST_CHECK_CLOSE(adam_op.getGradientThreshold(), 10.0, 1e4); BOOST_CHECK_CLOSE(adam_op.getGradientNoiseSigma(), 1.0, 1e4); BOOST_CHECK_CLOSE(adam_op.getGradientNoiseGamma(), 0.55, 1e4); BOOST_CHECK_CLOSE(adam_op.getEps(), 1e-24, 1e4); //BOOST_CHECK_EQUAL(adam_op.getParameters(), "gradient_threshold:1000000.000000;gradient_noise_sigma:1.000000;gradient_noise_gamma:0.550000;learning_rate:0.010000;momentum:0.900000;momentum2:0.999000;delta:0.000000;momentum_prev:0.000000;momentum2_prev:0.000000"); SVAGTensorOp<float, Eigen::DefaultDevice> svag_op(10.0f, 1.0f, 0.55f); BOOST_CHECK_EQUAL(svag_op.getName(), "SVAGTensorOp"); BOOST_CHECK_CLOSE(svag_op.getGradientThreshold(), 10.0, 1e4); BOOST_CHECK_CLOSE(svag_op.getGradientNoiseSigma(), 1.0, 1e4); BOOST_CHECK_CLOSE(svag_op.getGradientNoiseGamma(), 0.55, 1e4); BOOST_CHECK_CLOSE(svag_op.getEps(), 1e-24, 1e4); //BOOST_CHECK_EQUAL(svag_op.getParameters(), "gradient_threshold:1000000.000000;gradient_noise_sigma:1.000000;gradient_noise_gamma:0.550000;learning_rate:0.010000;momentum:0.900000;momentum2:0.999000;delta:0.000000;momentum_prev:0.000000;momentum2_prev:0.000000"); DummySolverTensorOp<float, Eigen::DefaultDevice> dummy_solver_op(10.0f, 1.0f, 0.55f); BOOST_CHECK_EQUAL(dummy_solver_op.getName(), "DummySolverTensorOp"); BOOST_CHECK_CLOSE(dummy_solver_op.getGradientThreshold(), 10.0, 1e4); BOOST_CHECK_CLOSE(dummy_solver_op.getGradientNoiseSigma(), 1.0, 1e4); BOOST_CHECK_CLOSE(dummy_solver_op.getGradientNoiseGamma(), 0.55, 1e4); BOOST_CHECK_CLOSE(dummy_solver_op.getEps(), 1e-24, 1e4); //BOOST_CHECK_EQUAL(dummy_solver_op.getParameters(), ""); } BOOST_AUTO_TEST_CASE(annealGradientNoiseSigma) { SGDTensorOp<float, Eigen::DefaultDevice> operation(10.0f, 1.0f, 0.55f); BOOST_CHECK_CLOSE(operation.annealGradientNoiseSigma(1), 0.683020115, 1e-4); BOOST_CHECK_CLOSE(operation.annealGradientNoiseSigma(100), 0.0789992958, 1e-4); } BOOST_AUTO_TEST_CASE(operationfunctionSGDOp) { SGDTensorOp<float, Eigen::DefaultDevice> operation; const int sink_layer_size = 1; const int source_layer_size = 2; const int iter = 0; Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> errors(source_layer_size, sink_layer_size); errors.setValues({ {0.1}, {10} }); Eigen::Tensor<float, 3> solver_params(source_layer_size, sink_layer_size, 3); solver_params.setValues({ {{0.01, 0.9, 0.0}}, {{0.01, 0.9, 0.0}} }); Eigen::DefaultDevice device; // Test operator operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights(0, 0), 0.999000013, 1e-4); BOOST_CHECK_CLOSE(weights(1, 0), 0.899999976, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 2), 0.0100000026, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 2), 1.00000024, 1e-4); Eigen::Tensor<float, 2> weights1(source_layer_size, sink_layer_size); weights1.setConstant(1); Eigen::Tensor<float, 3> solver_params1(source_layer_size, sink_layer_size, 3); solver_params1.setValues({ {{0.01, 0.9, 0.0}}, {{0.01, 0.9, 0.0}} }); operation.setGradientThreshold(1.0); // Test second operator call operation(weights1.data(), errors.data(), solver_params1.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights1(0, 0), 0.999000013, 1e-4); BOOST_CHECK_CLOSE(weights1(1, 0), 0.99000001, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 2), 0.0100000026, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 2), 0.100000024, 1e-4); // Test operator call with noise operation.setGradientNoiseSigma(10.0f); weights.setConstant(1); errors.setValues({ {0.1}, {10} }); solver_params.setValues({ {{0.01, 0.9, 0.0}}, {{0.01, 0.9, 0.0}} }); operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK(weights(0, 0) != 0.999000013, 1e-4); BOOST_CHECK(weights(1, 0)!= 0.899999976, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.9, 1e-4); BOOST_CHECK(solver_params(0, 0, 2)!= 0.0100000026, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.9, 1e-4); BOOST_CHECK(solver_params(1, 0, 2)!= 1.00000024, 1e-4); } BOOST_AUTO_TEST_CASE(operationfunctionSSDOp) { SSDTensorOp<float, Eigen::DefaultDevice> operation; const int sink_layer_size = 1; const int source_layer_size = 2; const int iter = 0; Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> errors(source_layer_size, sink_layer_size); errors.setValues({ {0.1}, {10} }); Eigen::Tensor<float, 3> solver_params(source_layer_size, sink_layer_size, 3); solver_params.setValues({ {{0.01, 0.9, 0.0}}, {{0.01, 0.9, 0.0}} }); Eigen::DefaultDevice device; // Test operator operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights(0, 0), 0.999000013, 1e-4); BOOST_CHECK_CLOSE(weights(1, 0), 0.999000013, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 2), 0.100000024, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 2), 0.100000024, 1e-4); Eigen::Tensor<float, 2> weights1(source_layer_size, sink_layer_size); weights1.setConstant(1); Eigen::Tensor<float, 3> solver_params1(source_layer_size, sink_layer_size, 3); solver_params1.setValues({ {{0.01, 0.9, 0.0}}, {{0.01, 0.9, 0.0}} }); operation.setGradientThreshold(1.0); // Test second operator call operation(weights1.data(), errors.data(), solver_params1.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights1(0, 0), 0.999000013, 1e-4); BOOST_CHECK_CLOSE(weights1(1, 0), 0.999000013, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 2), 0.100000024, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 2), 0.100000024, 1e-4); // Test operator call with noise operation.setGradientNoiseSigma(10.0f); weights.setConstant(1); errors.setValues({ {0.1}, {10} }); solver_params.setValues({ {{0.01, 0.9, 0.0}}, {{0.01, 0.9, 0.0}} }); operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK(weights(0, 0) != 0.999000013, 1e-4); BOOST_CHECK(weights(1, 0) != 0.999000013, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.9, 1e-4); BOOST_CHECK(solver_params(0, 0, 2) != 0.100000024, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.9, 1e-4); BOOST_CHECK(solver_params(1, 0, 2) != 0.100000024, 1e-4); } BOOST_AUTO_TEST_CASE(operationfunctionAdamOp) { AdamTensorOp<float, Eigen::DefaultDevice> operation; const int sink_layer_size = 1; const int source_layer_size = 2; const int iter = 0; Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> errors(source_layer_size, sink_layer_size); errors.setValues({ {0.1}, {10} }); Eigen::Tensor<float, 3> solver_params(source_layer_size, sink_layer_size, 6); solver_params.setValues({ {{0.01, 0.9, 0.999, 1e-8, 0.0, 0.0}}, {{0.01, 0.9, 0.999, 1e-8, 0.0, 0.0}} }); Eigen::DefaultDevice device; // Test operator call operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights(0, 0), 0.99000001, 1e-4); BOOST_CHECK_CLOSE(weights(1, 0), 0.99000001, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 2), 0.999, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 3), 1e-8, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 4), 0.0100000026, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 5), 9.99987151e-06, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 2), 0.999, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 3), 1e-8, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 4), 1.00000024, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 5), 0.0999987125, 1e-4); // Test second operator call Eigen::Tensor<float, 2> weights1(source_layer_size, sink_layer_size); weights1.setConstant(1); Eigen::Tensor<float, 3> solver_params1(source_layer_size, sink_layer_size, 6); solver_params1.setValues({ {{0.01, 0.9, 0.999, 1e-8, 0.0, 0.0}}, {{0.01, 0.9, 0.999, 1e-8, 0.0, 0.0}} }); operation.setGradientThreshold(1.0); operation(weights1.data(), errors.data(), solver_params1.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights1(0, 0), 0.99000001, 1e-4); BOOST_CHECK_CLOSE(weights1(1, 0), 0.99000001, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 2), 0.999, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 3), 1e-8, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 4), 0.0100000026, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 5), 9.99987151e-06, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 2), 0.999, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 3), 1e-8, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 4), 0.1000, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 5), 0.000999987125, 1e-4); // Test operator call with noise operation.setGradientNoiseSigma(10.0f); weights.setConstant(1); errors.setValues({ {0.1}, {10} }); solver_params.setValues({ {{0.01, 0.9, 0.999, 1e-8, 0.0, 0.0}}, {{0.01, 0.9, 0.999, 1e-8, 0.0, 0.0}} }); operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK(weights(0, 0) != 0.99000001, 1e-4); BOOST_CHECK(weights(1, 0) != 0.99000001, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 2), 0.999, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 3), 1e-8, 1e-4); BOOST_CHECK(solver_params(0, 0, 4) != 0.0100000026, 1e-4); BOOST_CHECK(solver_params(0, 0, 5) != 9.99987151e-06, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 2), 0.999, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 3), 1e-8, 1e-4); BOOST_CHECK(solver_params(1, 0, 4) != 1.00000024, 1e-4); BOOST_CHECK(solver_params(1, 0, 5) != 0.0999987125, 1e-4); } BOOST_AUTO_TEST_CASE(operationfunctionSVAGOp) { SVAGTensorOp<float, Eigen::DefaultDevice> operation; const int sink_layer_size = 1; const int source_layer_size = 2; const int iter = 0; Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> errors(source_layer_size, sink_layer_size); errors.setValues({ {0.1}, {10} }); Eigen::Tensor<float, 3> solver_params(source_layer_size, sink_layer_size, 4); solver_params.setValues({ {{0.01, 0.9, 0.0, 0.0}}, {{0.01, 0.9, 0.0, 0.0}} }); Eigen::DefaultDevice device; // Test operator call operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights(0, 0), 0.999000013, 1e-4); BOOST_CHECK_CLOSE(weights(1, 0), 0.899999976, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 2), 0.01000000, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 3), 0.001000000, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 2), 1.000000, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 3), 10.00000, 1e-4); // Test second operator call Eigen::Tensor<float, 2> weights1(source_layer_size, sink_layer_size); weights1.setConstant(1); Eigen::Tensor<float, 3> solver_params1(source_layer_size, sink_layer_size, 4); solver_params1.setValues({ {{0.01, 0.9, 0.0, 0.0}}, {{0.01, 0.9, 0.0, 0.0}} }); operation.setGradientThreshold(1.0); operation(weights1.data(), errors.data(), solver_params1.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights1(0, 0), 0.9990000, 1e-4); BOOST_CHECK_CLOSE(weights1(1, 0), 0.9900000, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 2), 0.0100000026, 1e-4); BOOST_CHECK_CLOSE(solver_params1(0, 0, 3), 0.00100000028, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 1), 0.9, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 2), 0.1000, 1e-4); BOOST_CHECK_CLOSE(solver_params1(1, 0, 3), 0.1000000, 1e-4); // Test operator call with noise operation.setGradientNoiseSigma(10.0f); weights.setConstant(1); errors.setValues({ {0.1}, {10} }); solver_params.setValues({ {{0.01, 0.9, 0.0, 0.0}}, {{0.01, 0.9, 0.0, 0.0}} }); operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK(weights(0, 0) != 0.999000013, 1e-4); BOOST_CHECK(weights(1, 0) != 0.899999976, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.9, 1e-4); BOOST_CHECK(solver_params(0, 0, 2) != 0.01000000); BOOST_CHECK(solver_params(0, 0, 3) != 0.001000000); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.9, 1e-4); BOOST_CHECK(solver_params(1, 0, 2) != 1.000000, 1e-4); BOOST_CHECK(solver_params(1, 0, 3) != 10.00000, 1e-4); } BOOST_AUTO_TEST_CASE(operationfunctionDummySolverOp) { DummySolverTensorOp<float, Eigen::DefaultDevice> operation; const int sink_layer_size = 1; const int source_layer_size = 2; const int iter = 0; Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> errors(source_layer_size, sink_layer_size); errors.setValues({ {0.1}, {10} }); Eigen::Tensor<float, 3> solver_params(source_layer_size, sink_layer_size, 3); solver_params.setValues({ {{0.01, 0.99, 0.0}}, {{0.01, 0.99, 0.0}} }); Eigen::DefaultDevice device; operation(weights.data(), errors.data(), solver_params.data(), source_layer_size, sink_layer_size, iter, device); BOOST_CHECK_CLOSE(weights(0, 0), 1, 1e-4); BOOST_CHECK_CLOSE(weights(1, 0), 1, 1e-4); BOOST_CHECK_CLOSE(errors(0, 0), 0.1, 1e-4); BOOST_CHECK_CLOSE(errors(1, 0), 10.0, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 1), 0.99, 1e-4); BOOST_CHECK_CLOSE(solver_params(0, 0, 2), 0, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 0), 0.01, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 1), 0.99, 1e-4); BOOST_CHECK_CLOSE(solver_params(1, 0, 2), 0, 1e-4); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELINTERPRETERFILEGPU_H #define EVONET_MODELINTERPRETERFILEGPU_H // .h #include <EvoNet/io/ModelInterpreterFile.h> #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <EvoNet/ml/ModelInterpreterGpu.h> #include <cuda.h> #include <cuda_runtime.h> namespace EvoNet { /** @brief ModelInterpreterFileGpu */ template<typename TensorT> class ModelInterpreterFileGpu : public ModelInterpreterFile<TensorT, ModelInterpreterGpu<TensorT>> { public: ModelInterpreterFileGpu() = default; ///< Default constructor ~ModelInterpreterFileGpu() = default; ///< Default destructor private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ModelInterpreterFile<TensorT, ModelInterpreterGpu<TensorT>>>(this)); } }; } #endif #endif //EVONET_MODELINTERPRETERFILEGPU_H<file_sep>Getting started with EvoNet =========================== .. include:: ../../README.rst :start-after: begin_introduction :end-before: end_introduction Quick Start ----------- Download SmartPeak from `GitHub <https://github.com/dmccloskey/EvoNet/releases>`_. <file_sep>cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) project("EvoNet_src") #------------------------------------------------------------------------------ # EvoNet core lib #------------------------------------------------------------------------------ add_subdirectory(evonet) #------------------------------------------------------------------------------ # Tests #------------------------------------------------------------------------------ option (BUILD_TESTS "Whether or not build unit tests" ON) if(BUILD_TESTS) add_subdirectory(tests) endif() #------------------------------------------------------------------------------ # Examples #------------------------------------------------------------------------------ option (BUILD_EXAMPLES "Whether or not build the examples" ON) if(BUILD_EXAMPLES) add_subdirectory(examples) endif()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_INTEGRATIONFUNCTION_H #define EVONET_INTEGRATIONFUNCTION_H #include <unsupported/Eigen/CXX11/Tensor> #include <cereal/access.hpp> // serialiation of private members #undef min // clashes with std::limit on windows in polymorphic.hpp #undef max // clashes with std::limit on windows in polymorphic.hpp #include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Base class for all integration functions. */ template<typename T> class IntegrationOp { public: IntegrationOp() = default; ~IntegrationOp() = default; void setEps(const T& eps) { eps_ = eps; } T getEps() const { return eps_; }; virtual std::string getName() const = 0; virtual IntegrationOp<T>* copy() const = 0; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(eps_); } T eps_ = (T)1e-6; }; /** @brief Sum integration function */ template<typename T> class SumOp: public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; std::string getName() const{return "SumOp";}; IntegrationOp<T>* copy() const { return new SumOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief Product integration function */ template<typename T> class ProdOp : public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; std::string getName() const { return "ProdOp"; }; IntegrationOp<T>* copy() const { return new ProdOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief Product singly connected integration function */ template<typename T> class ProdSCOp : public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; std::string getName() const { return "ProdSCOp"; }; IntegrationOp<T>* copy() const { return new ProdSCOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief Max integration function */ template<typename T> class MaxOp : public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; std::string getName() const { return "MaxOp"; }; IntegrationOp<T>* copy() const { return new MaxOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief Max integration function */ template<typename T> class MinOp : public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; std::string getName() const { return "MinOp"; }; IntegrationOp<T>* copy() const { return new MinOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief Mean integration function */ template<typename T> class MeanOp : public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; std::string getName() const { return "MeanOp"; }; IntegrationOp<T>* copy() const { return new MeanOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief Variance integration function References: T.F.Chan, <NAME> and <NAME> (1983). ""Algorithms for computing the sample variance: Analysis and recommendations", The American Statistician, 37": 242–247. */ template<typename T> class VarianceOp : public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; //Eigen::Tensor<T, 1> getNetNodeInput() const { // Eigen::Tensor<T, 1> n(this->net_node_input_.dimension(0)); // n.setConstant(this->n_); // return (this->net_node_input_ - (ex_ * ex_)/ n)/n; } //void operator()(const Eigen::Tensor<T, 1>& weight, const Eigen::Tensor<T, 1>&source_output) { // auto input = weight * source_output; // if (this->n_ == 0) // k_ = input; // auto input_k = input - k_; // ex_ += input_k; // this->n_ += 1; // this->net_node_input_ += (input_k * input_k); //}; std::string getName() const { return "VarianceOp"; }; IntegrationOp<T>* copy() const { return new VarianceOp<T>(*this); } //private: // Eigen::Tensor<T, 1> k_ = 0; // Eigen::Tensor<T, 1> ex_ = 0; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief VarMod integration function Modified variance integration function: 1/n Sum[0 to n](Xi)^2 where Xi = xi - u (u: mean, xi: single sample) */ template<typename T> class VarModOp : public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; std::string getName() const { return "VarModOp"; }; IntegrationOp<T>* copy() const { return new VarModOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief Count integration function */ template<typename T> class CountOp : public IntegrationOp<T> { public: using IntegrationOp<T>::IntegrationOp; std::string getName() const { return "CountOp"; }; IntegrationOp<T>* copy() const { return new CountOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationOp<T>>(this)); } }; /** @brief Base class for all integration error functions. */ template<typename T> class IntegrationErrorOp { public: IntegrationErrorOp() = default; ~IntegrationErrorOp() = default; void setEps(const T& eps) { eps_ = eps; } T getEps() const { return eps_; }; virtual std::string getName() const = 0; virtual IntegrationErrorOp<T>* copy() const = 0; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(eps_); } T eps_ = (T)1e-6; }; /** @brief Sum integration error function */ template<typename T> class SumErrorOp : public IntegrationErrorOp<T> { public: using IntegrationErrorOp<T>::IntegrationErrorOp; std::string getName() const { return "SumErrorOp"; }; IntegrationErrorOp<T>* copy() const { return new SumErrorOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationErrorOp<T>>(this)); } }; /** @brief Product integration error function */ template<typename T> class ProdErrorOp : public IntegrationErrorOp<T> { public: using IntegrationErrorOp<T>::IntegrationErrorOp; std::string getName() const { return "ProdErrorOp"; }; IntegrationErrorOp<T>* copy() const { return new ProdErrorOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationErrorOp<T>>(this)); } }; /** @brief Max integration error function */ template<typename T> class MaxErrorOp : public IntegrationErrorOp<T> { public: using IntegrationErrorOp<T>::IntegrationErrorOp; std::string getName() const { return "MaxErrorOp"; }; IntegrationErrorOp<T>* copy() const { return new MaxErrorOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationErrorOp<T>>(this)); } }; /** @brief Min integration error function */ template<typename T> class MinErrorOp : public IntegrationErrorOp<T> { public: using IntegrationErrorOp<T>::IntegrationErrorOp; std::string getName() const { return "MinErrorOp"; }; IntegrationErrorOp<T>* copy() const { return new MinErrorOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationErrorOp<T>>(this)); } }; /** @brief Mean integration error function */ template<typename T> class MeanErrorOp : public IntegrationErrorOp<T> { public: using IntegrationErrorOp<T>::IntegrationErrorOp; std::string getName() const { return "MeanErrorOp"; }; IntegrationErrorOp<T>* copy() const { return new MeanErrorOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationErrorOp<T>>(this)); } }; /** @brief VarMod integration error function */ template<typename T> class VarModErrorOp : public IntegrationErrorOp<T> { public: using IntegrationErrorOp<T>::IntegrationErrorOp; std::string getName() const { return "VarModErrorOp"; }; IntegrationErrorOp<T>* copy() const { return new VarModErrorOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationErrorOp<T>>(this)); } }; /** @brief Count integration error function */ template<typename T> class CountErrorOp : public IntegrationErrorOp<T> { public: using IntegrationErrorOp<T>::IntegrationErrorOp; std::string getName() const { return "CountErrorOp"; }; IntegrationErrorOp<T>* copy() const { return new CountErrorOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationErrorOp<T>>(this)); } }; /** @brief Base class for all integration error functions. */ template<typename T> class IntegrationWeightGradOp { public: IntegrationWeightGradOp() = default; ~IntegrationWeightGradOp() = default; void setEps(const T& eps) { eps_ = eps; } T getEps() const { return eps_; }; virtual std::string getName() const = 0; virtual IntegrationWeightGradOp<T>* copy() const = 0; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(eps_); } T eps_ = (T)1e-6; }; /** @brief Sum integration error function */ template<typename T> class SumWeightGradOp : public IntegrationWeightGradOp<T> { public: using IntegrationWeightGradOp<T>::IntegrationWeightGradOp; std::string getName() const { return "SumWeightGradOp"; }; IntegrationWeightGradOp<T>* copy() const { return new SumWeightGradOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationWeightGradOp<T>>(this)); } }; /** @brief Product integration error function */ template<typename T> class ProdWeightGradOp : public IntegrationWeightGradOp<T> { public: using IntegrationWeightGradOp<T>::IntegrationWeightGradOp; std::string getName() const { return "ProdWeightGradOp"; }; IntegrationWeightGradOp<T>* copy() const { return new ProdWeightGradOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationWeightGradOp<T>>(this)); } }; /** @brief Max integration error function */ template<typename T> class MaxWeightGradOp : public IntegrationWeightGradOp<T> { public: using IntegrationWeightGradOp<T>::IntegrationWeightGradOp; std::string getName() const { return "MaxWeightGradOp"; }; IntegrationWeightGradOp<T>* copy() const { return new MaxWeightGradOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationWeightGradOp<T>>(this)); } }; /** @brief Min integration error function */ template<typename T> class MinWeightGradOp : public IntegrationWeightGradOp<T> { public: using IntegrationWeightGradOp<T>::IntegrationWeightGradOp; std::string getName() const { return "MinWeightGradOp"; }; IntegrationWeightGradOp<T>* copy() const { return new MinWeightGradOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationWeightGradOp<T>>(this)); } }; /** @brief Count integration error function */ template<typename T> class CountWeightGradOp : public IntegrationWeightGradOp<T> { public: using IntegrationWeightGradOp<T>::IntegrationWeightGradOp; std::string getName() const { return "CountWeightGradOp"; }; IntegrationWeightGradOp<T>* copy() const { return new CountWeightGradOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationWeightGradOp<T>>(this)); } }; /** @brief Mean integration error function */ template<typename T> class MeanWeightGradOp : public IntegrationWeightGradOp<T> { public: using IntegrationWeightGradOp<T>::IntegrationWeightGradOp; std::string getName() const { return "MeanWeightGradOp"; }; IntegrationWeightGradOp<T>* copy() const { return new MeanWeightGradOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationWeightGradOp<T>>(this)); } }; /** @brief VarMod integration error function */ template<typename T> class VarModWeightGradOp : public IntegrationWeightGradOp<T> { public: using IntegrationWeightGradOp<T>::IntegrationWeightGradOp; std::string getName() const { return "VarModWeightGradOp"; }; IntegrationWeightGradOp<T>* copy() const { return new VarModWeightGradOp<T>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<IntegrationWeightGradOp<T>>(this)); } }; } CEREAL_REGISTER_TYPE(EvoNet::SumOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ProdOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ProdSCOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MaxOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MinOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MeanOp<float>); CEREAL_REGISTER_TYPE(EvoNet::VarianceOp<float>); CEREAL_REGISTER_TYPE(EvoNet::VarModOp<float>); CEREAL_REGISTER_TYPE(EvoNet::CountOp<float>); CEREAL_REGISTER_TYPE(EvoNet::SumErrorOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ProdErrorOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MaxErrorOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MinErrorOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MeanErrorOp<float>); CEREAL_REGISTER_TYPE(EvoNet::VarModErrorOp<float>); CEREAL_REGISTER_TYPE(EvoNet::CountErrorOp<float>); CEREAL_REGISTER_TYPE(EvoNet::SumWeightGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ProdWeightGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MaxWeightGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MinWeightGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::CountWeightGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::MeanWeightGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::VarModWeightGradOp<float>); //CEREAL_REGISTER_TYPE(EvoNet::SumOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ProdOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ProdSCOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MaxOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MinOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MeanOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::VarianceOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::VarModOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::CountOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SumErrorOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ProdErrorOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MaxErrorOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MinErrorOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MeanErrorOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::VarModErrorOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::CountErrorOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SumWeightGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ProdWeightGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MaxWeightGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MinWeightGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::CountWeightGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::MeanWeightGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::VarModWeightGradOp<double>); // //CEREAL_REGISTER_TYPE(EvoNet::SumOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ProdOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ProdSCOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MaxOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MinOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MeanOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::VarianceOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::VarModOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::CountOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::SumErrorOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ProdErrorOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MaxErrorOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MinErrorOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MeanErrorOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::VarModErrorOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::CountErrorOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::SumWeightGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ProdWeightGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MaxWeightGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MinWeightGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::CountWeightGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::MeanWeightGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::VarModWeightGradOp<int>); #endif //EVONET_INTEGRATIONFUNCTION_H<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/simulator/MetabolomicsClassificationDataSimulator.h> #include <EvoNet/models/CVAEFullyConnDefaultDevice.h> #include <unsupported/Eigen/CXX11/Tensor> #include "Metabolomics_CVAE.h" using namespace EvoNet; using namespace EvoNetMetabolomics; template<class ...ParameterTypes> void main_(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the data simulator std::cout << "Making the training and validation data..." << std::endl; MetabolomicsClassificationDataSimulator<float> data_simulator; const int n_features = makeDataSimulator(data_simulator, args...); // Make the input nodes std::vector<std::string> input_nodes; makeInputNodes(input_nodes, n_features); // Make the output nodes std::vector<std::string> encoding_nodes_alpha = makeAlphaEncodingNodes(args...); // define the model trainer CVAEFullyConnDefaultDevice<float> model_trainer; makeModelTrainer<float>(model_trainer, std::vector<std::string>(), std::vector<std::string>(), std::vector<std::string>(), std::vector<std::string>(), encoding_nodes_alpha, args...); // define the model and resources Model<float> model; std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; ModelInterpreterFileDefaultDevice<float> model_interpreter_file; makeModelAndInterpreters(model, model_trainer, model_interpreters, model_interpreter_file, n_features, args...); // define the model logger ModelLogger<float> model_logger(true, true, true, false, false, false, false); // Train the model std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); } // Main int main(int argc, char** argv) { // Parse the user commands int id_int = -1; std::string parameters_filename = ""; parseCommandLineArguments(argc, argv, id_int, parameters_filename); // Set the parameter names and defaults EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::General::OutputDir output_dir("output_dir", std::string("")); EvoNetParameters::Main::DeviceId device_id("device_id", 0); EvoNetParameters::Main::ModelName model_name("model_name", ""); EvoNetParameters::Main::MakeModel make_model("make_model", true); EvoNetParameters::Main::LoadModelCsv load_model_csv("load_model_csv", false); EvoNetParameters::Main::LoadModelBinary load_model_binary("load_model_binary", false); EvoNetParameters::Main::TrainModel train_model("train_model", true); EvoNetParameters::Main::EvolveModel evolve_model("evolve_model", false); EvoNetParameters::Main::EvaluateModel evaluate_model("evaluate_model", false); EvoNetParameters::Main::EvaluateModels evaluate_models("evaluate_models", false); EvoNetParameters::Examples::ModelType model_type("model_type", "EncDec"); // Options include EncDec, Enc, Dec EvoNetParameters::Examples::SimulationType simulation_type("simulation_type", ""); EvoNetParameters::Examples::BiochemicalRxnsFilename biochemical_rxns_filename("biochemical_rxns_filename", ""); EvoNetParameters::Examples::MetaboDataTrainFilename metabo_data_train_filename("metabo_data_train_filename", ""); EvoNetParameters::Examples::MetaboDataTestFilename metabo_data_test_filename("metabo_data_test_filename", ""); EvoNetParameters::Examples::MetaDataTrainFilename meta_data_train_filename("meta_data_train_filename", ""); EvoNetParameters::Examples::MetaDataTestFilename meta_data_test_filename("meta_data_test_filename", ""); EvoNetParameters::Examples::UseConcentrations use_concentrations("use_concentrations", true); EvoNetParameters::Examples::UseMARs use_MARs("use_MARs", false); EvoNetParameters::Examples::SampleValues sample_values("sample_values", true); EvoNetParameters::Examples::IterValues iter_values("iter_values", false); EvoNetParameters::Examples::FillSampling fill_sampling("fill_sampling", false); EvoNetParameters::Examples::FillMean fill_mean("fill_mean", false); EvoNetParameters::Examples::FillZero fill_zero("fill_zero", false); EvoNetParameters::Examples::ApplyFoldChange apply_fold_change("apply_fold_change", false); EvoNetParameters::Examples::FoldChangeRef fold_change_ref("fold_change_ref", "Evo04"); EvoNetParameters::Examples::FoldChangeLogBase fold_change_log_base("fold_change_log_base", 10); EvoNetParameters::Examples::OfflineLinearScaleInput offline_linear_scale_input("offline_linear_scale_input", true); EvoNetParameters::Examples::OfflineLogTransformInput offline_log_transform_input("offline_log_transform_input", false); EvoNetParameters::Examples::OfflineStandardizeInput offline_standardize_input("offline_standardize_input", false); EvoNetParameters::Examples::OnlineLinearScaleInput online_linear_scale_input("online_linear_scale_input", false); EvoNetParameters::Examples::OnlineLogTransformInput online_log_transform_input("online_log_transform_input", false); EvoNetParameters::Examples::OnlineStandardizeInput online_standardize_input("online_standardize_input", false); EvoNetParameters::Examples::SupervisionWarmup supervision_warmup("supervision_warmup", false); EvoNetParameters::Examples::SupervisionPercent supervision_percent("supervision_percent", 0); EvoNetParameters::PopulationTrainer::PopulationName population_name("population_name", ""); EvoNetParameters::PopulationTrainer::NGenerations n_generations("n_generations", 1); EvoNetParameters::PopulationTrainer::NInterpreters n_interpreters("n_interpreters", 1); EvoNetParameters::PopulationTrainer::PruneModelNum prune_model_num("prune_model_num", 10); EvoNetParameters::PopulationTrainer::RemoveIsolatedNodes remove_isolated_nodes("remove_isolated_nodes", true); EvoNetParameters::PopulationTrainer::CheckCompleteModelInputToOutput check_complete_model_input_to_output("check_complete_model_input_to_output", true); EvoNetParameters::PopulationTrainer::PopulationSize population_size("population_size", 128); EvoNetParameters::PopulationTrainer::NTop n_top("n_top", 8); EvoNetParameters::PopulationTrainer::NRandom n_random("n_random", 8); EvoNetParameters::PopulationTrainer::NReplicatesPerModel n_replicates_per_model("n_replicates_per_model", 1); EvoNetParameters::PopulationTrainer::ResetModelCopyWeights reset_model_copy_weights("reset_model_copy_weights", true); EvoNetParameters::PopulationTrainer::ResetModelTemplateWeights reset_model_template_weights("reset_model_template_weights", true); EvoNetParameters::PopulationTrainer::Logging population_logging("population_logging", true); EvoNetParameters::PopulationTrainer::SetPopulationSizeFixed set_population_size_fixed("set_population_size_fixed", false); EvoNetParameters::PopulationTrainer::SetPopulationSizeDoubling set_population_size_doubling("set_population_size_doubling", true); EvoNetParameters::PopulationTrainer::SetTrainingStepsByModelSize set_training_steps_by_model_size("set_training_steps_by_model_size", false); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 32); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 64); EvoNetParameters::ModelTrainer::NEpochsTraining n_epochs_training("n_epochs_training", 1000); EvoNetParameters::ModelTrainer::NEpochsValidation n_epochs_validation("n_epochs_validation", 25); EvoNetParameters::ModelTrainer::NEpochsEvaluation n_epochs_evaluation("n_epochs_evaluation", 10); EvoNetParameters::ModelTrainer::NTBTTSteps n_tbtt_steps("n_tbtt_steps", 64); EvoNetParameters::ModelTrainer::NTETTSteps n_tett_steps("n_tett_steps", 64); EvoNetParameters::ModelTrainer::Verbosity verbosity("verbosity", 1); EvoNetParameters::ModelTrainer::LoggingTraining logging_training("logging_training", true); EvoNetParameters::ModelTrainer::LoggingValidation logging_validation("logging_validation", false); EvoNetParameters::ModelTrainer::LoggingEvaluation logging_evaluation("logging_evaluation", true); EvoNetParameters::ModelTrainer::FindCycles find_cycles("find_cycles", true); EvoNetParameters::ModelTrainer::FastInterpreter fast_interpreter("fast_interpreter", true); EvoNetParameters::ModelTrainer::PreserveOoO preserve_ooo("preserve_ooo", true); EvoNetParameters::ModelTrainer::InterpretModel interpret_model("interpret_model", true); EvoNetParameters::ModelTrainer::ResetModel reset_model("reset_model", false); EvoNetParameters::ModelTrainer::NHidden0 n_hidden_0("n_hidden_0", 16); EvoNetParameters::ModelTrainer::NHidden1 n_hidden_1("n_hidden_1", 0); EvoNetParameters::ModelTrainer::NHidden2 n_hidden_2("n_hidden_2", 0); EvoNetParameters::ModelTrainer::LossFncWeight0 loss_fnc_weight_0("loss_fnc_weight_0", 1); // Classification loss EvoNetParameters::ModelTrainer::LossFncWeight1 loss_fnc_weight_1("loss_fnc_weight_1", 1); // Reconstruction loss EvoNetParameters::ModelTrainer::LossFncWeight2 loss_fnc_weight_2("loss_fnc_weight_2", 0); EvoNetParameters::ModelTrainer::LearningRate learning_rate("learning_rate", 1e-5); EvoNetParameters::ModelTrainer::GradientClipping gradient_clipping("gradient_clipping", 10); EvoNetParameters::ModelTrainer::ResetInterpreter reset_interpreter("reset_interpreter", true); EvoNetParameters::ModelTrainer::LossFunction loss_function("loss_function", "MSE"); EvoNetParameters::ModelTrainer::KLDivergenceWarmup KL_divergence_warmup("KL_divergence_warmup", true); EvoNetParameters::ModelTrainer::NEncodingsContinuous n_encodings_continuous("n_encodings_continuous", 8); EvoNetParameters::ModelTrainer::NEncodingsCategorical n_encodings_categorical("n_encodings_categorical", 8); EvoNetParameters::ModelTrainer::BetaC beta_c("beta_c", 30); EvoNetParameters::ModelTrainer::BetaD beta_d("beta_d", 30); EvoNetParameters::ModelTrainer::CapacityC capacity_c("capacity_c", 5); EvoNetParameters::ModelTrainer::CapacityD capacity_d("capacity_d", 5); EvoNetParameters::ModelReplicator::NNodeDownAdditionsLB n_node_down_additions_lb("n_node_down_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsLB n_node_right_additions_lb("n_node_right_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesLB n_node_down_copies_lb("n_node_down_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesLB n_node_right_copies_lb("n_node_right_copies_lb", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsLB n_link_additons_lb("n_link_additons_lb", 0); EvoNetParameters::ModelReplicator::NLinkCopiesLB n_link_copies_lb("n_link_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsLB n_node_deletions_lb("n_node_deletions_lb", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsLB n_link_deletions_lb("n_link_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesLB n_node_activation_changes_lb("n_node_activation_changes_lb", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesLB n_node_integration_changes_lb("n_node_integration_changes_lb", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsLB n_module_additions_lb("n_module_additions_lb", 0); EvoNetParameters::ModelReplicator::NModuleCopiesLB n_module_copies_lb("n_module_copies_lb", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsLB n_module_deletions_lb("n_module_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownAdditionsUB n_node_down_additions_ub("n_node_down_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsUB n_node_right_additions_ub("n_node_right_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesUB n_node_down_copies_ub("n_node_down_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesUB n_node_right_copies_ub("n_node_right_copies_ub", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsUB n_link_additons_ub("n_link_additons_ub", 0); EvoNetParameters::ModelReplicator::NLinkCopiesUB n_link_copies_ub("n_link_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsUB n_node_deletions_ub("n_node_deletions_ub", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsUB n_link_deletions_ub("n_link_deletions_ub", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesUB n_node_activation_changes_ub("n_node_activation_changes_ub", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesUB n_node_integration_changes_ub("n_node_integration_changes_ub", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsUB n_module_additions_ub("n_module_additions_ub", 0); EvoNetParameters::ModelReplicator::NModuleCopiesUB n_module_copies_ub("n_module_copies_ub", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsUB n_module_deletions_ub("n_module_deletions_ub", 0); EvoNetParameters::ModelReplicator::SetModificationRateFixed set_modification_rate_fixed("set_modification_rate_fixed", false); EvoNetParameters::ModelReplicator::SetModificationRateByPrevError set_modification_rate_by_prev_error("set_modification_rate_by_prev_error", false); auto parameters = std::make_tuple(id, data_dir, output_dir, device_id, model_name, make_model, load_model_csv, load_model_binary, train_model, evolve_model, evaluate_model, evaluate_models, model_type, simulation_type, biochemical_rxns_filename, metabo_data_train_filename, metabo_data_test_filename, meta_data_train_filename, meta_data_test_filename, use_concentrations, use_MARs, sample_values, iter_values, fill_sampling, fill_mean, fill_zero, apply_fold_change, fold_change_ref, fold_change_log_base, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, online_linear_scale_input, online_log_transform_input, online_standardize_input, supervision_warmup, supervision_percent, population_name, n_generations, n_interpreters, /*prune_model_num, remove_isolated_nodes, check_complete_model_input_to_output, population_size, n_top, n_random, n_replicates_per_model, reset_model_copy_weights, reset_model_template_weights, population_logging, set_population_size_fixed, set_population_size_doubling, set_training_steps_by_model_size,*/ batch_size, memory_size, n_epochs_training, n_epochs_validation, n_epochs_evaluation, n_tbtt_steps, n_tett_steps, verbosity, logging_training, logging_validation, logging_evaluation, find_cycles, fast_interpreter, preserve_ooo, interpret_model, reset_model, n_hidden_0, n_hidden_1, n_hidden_2, loss_fnc_weight_0, loss_fnc_weight_1, loss_fnc_weight_2, learning_rate, gradient_clipping, reset_interpreter, loss_function, KL_divergence_warmup, n_encodings_continuous, n_encodings_categorical, beta_c, beta_d, capacity_c, capacity_d/*, n_node_down_additions_lb, n_node_right_additions_lb, n_node_down_copies_lb, n_node_right_copies_lb, n_link_additons_lb, n_link_copies_lb, n_node_deletions_lb, n_link_deletions_lb, n_node_activation_changes_lb, n_node_integration_changes_lb, n_module_additions_lb, n_module_copies_lb, n_module_deletions_lb, n_node_down_additions_ub, n_node_right_additions_ub, n_node_down_copies_ub, n_node_right_copies_ub, n_link_additons_ub, n_link_copies_ub, n_node_deletions_ub, n_link_deletions_ub, n_node_activation_changes_ub, n_node_integration_changes_ub, n_module_additions_ub, n_module_copies_ub, n_module_deletions_ub, set_modification_rate_fixed, set_modification_rate_by_prev_error*/); // Read in the parameters LoadParametersFromCsv loadParametersFromCsv(id_int, parameters_filename); parameters = EvoNet::apply([&loadParametersFromCsv](auto&& ...args) { return loadParametersFromCsv(args...); }, parameters); // Run the application EvoNet::apply([](auto&& ...args) { main_(args ...); }, parameters); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_HARMONICOSCILLATORSIMULATOR_H #define EVONET_HARMONICOSCILLATORSIMULATOR_H #include <EvoNet/simulator/DataSimulator.h> #include <EvoNet/core/Preprocessing.h> namespace EvoNet { /** @brief Simulator of various Harmonic Oscillators References: https://www.dcode.fr/differential-equation-solver http://www.physics.hmc.edu/~saeta/courses/p111/uploads/Y2013/chap13.pdf https://projects.ncsu.edu/crsc/events/ugw05/slides/root_harmonic.pdf http://www.sharetechnote.com/html/DE_Modeling_Example_SpringMass.html#SingleSpring_SimpleHarmonic */ template<typename TensorT> class HarmonicOscillatorSimulator : public DataSimulator<TensorT> { public: // TODO: Move simulateValidation/Training/Evaluation methods from example and provide unit testing /* @brief 3 weight and 2 spring system (1D) without damping From Newtons law, the forces on each mass are the following: F1: k1(x2 - x1) = m1x1`` F2: - k1(x2 - x1) + k2(x3 - x2) = m2x2`` F3: - k2(x3 - x2) = m3x3`` Let k1, k2 = k; and m1, m2, m3 = m Solving the ODE analytical for each displacement gives F1: x1 = x2 + A1*sin(wt) + A1*cos(wt) F2: x2 = (kx1 + kx3)/(2k) + A2*sin(sqrt(2)*wt) + A2*cos(sqrt(2)*wt) F3: x3 = x2 + A3*sin(wt) + A3*cos(wt) where w = sqrt(k/m) [TODO: add tests] @param[in, out] time_steps (dim0: n_time_steps) @param[in, out] displacements (dim0: n_time_steps, dim1: x1...x3 displacements) @param[in] n_time_steps The number of time-steps @param[in] time_intervals The spacing between time-steps @param[in] A1...A3 The amplitudes for each of the mass oscillations @param[in] m1...m3 The mass values @param[in] x1o...x3o The starting mass displacements from their starting positions @param[in] k The spring constant (for simplicity, we assume all spring constants are the same) @returns time_steps and displacements for the system **/ static void WeightSpring3W2S1D( Eigen::Tensor<TensorT, 1>& time_steps, Eigen::Tensor<TensorT, 2>& displacements, const int& n_time_steps, const TensorT& time_intervals, const TensorT& A1, const TensorT& A2, const TensorT& A3, const TensorT& m1, const TensorT& m2, const TensorT& m3, const TensorT& x1o, const TensorT& x2o, const TensorT& x3o, const TensorT& k); /* @brief 1 weight and 1 spring system (1D) without damping system Where the spring is tethered to a rigid body Analytical solution F1: x1 = x1o*cost(wt) + v1o/w*sin(wt) where w = sqrt(k/m) and where x1o is the initial displacement with initial velocity vo [TODO: add tests] @param[in, out] time_steps (dim0: n_time_steps) @param[in, out] displacements (dim0: n_time_steps, dim1: x1...x3 displacements) @param[in] n_time_steps The number of time-steps @param[in] time_intervals The spacing between time-steps @param[in] m1 The mass values @param[in] k The spring constant (for simplicity, we assume all spring constants are the same) @param[in] x1o The starting mass displacements from their starting positions @param[in] v1o The starting mass velocity (e.g., 0) @returns time_steps and displacements for the system **/ static void WeightSpring1W1S1D( Eigen::Tensor<TensorT, 1>& time_steps, Eigen::Tensor<TensorT, 2>& displacements, const int& n_time_steps, const TensorT& time_intervals, const TensorT& m1, const TensorT& k1, const TensorT& x1o, const TensorT& v1o); /* @brief 1 weight and 1 spring system (1D) with damping system Where the spring is tethered to a rigid body Analytical solution for 0 < beta < 1 F1: x1 = exp(-beta1 * w * t * ((v1o + beta1 * w * x1o) / wd) * sin(wd * t) + x1o * cos(wd * t)); where w = sqrt(k1 / m) and where wd = w * sqrt(1-pow(beta1, 2)) and where x1o is the initial displacement with initial velocity v1o [TODO: add tests] @param[in, out] time_steps (dim0: n_time_steps) @param[in, out] displacements (dim0: n_time_steps, dim1: x1...x3 displacements) @param[in] n_time_steps The number of time-steps @param[in] time_intervals The spacing between time-steps @param[in] m1 The mass values @param[in] k The spring constant (for simplicity, we assume all spring constants are the same) @param[in] beta The damping constant @param[in] x1o The starting mass displacements from their starting positions @param[in] v1o The starting mass velocity (e.g., 0) @returns time_steps and displacements for the system **/ static void WeightSpring1W1S1DwDamping( Eigen::Tensor<TensorT, 1>& time_steps, Eigen::Tensor<TensorT, 2>& displacements, const int& n_time_steps, const TensorT& time_intervals, const TensorT& m1, const TensorT& k1, const TensorT& beta1, const TensorT& x1o, const TensorT& v1o); /* @brief 2 weight and 3 spring system (1D) without damping Where the two end springs are tethered to rigid bodies References: See equations 13.30 and 13.31 in Chapter 13 Coupled Oscillators [TODO: add tests] @param[in, out] time_steps (dim0: n_time_steps) @param[in, out] displacements (dim0: n_time_steps, dim1: x1...x3 displacements) @param[in] n_time_steps The number of time-steps @param[in] time_intervals The spacing between time-steps @param[in] A1, A2 The amplitudes for each of the mass oscillations @param[in] m1, m2 The mass values @param[in] x1o, x2o The starting mass displacements from their starting positions @param[in] k1 are the spring constants (assuming the same spring constant for simplicity) @returns time_steps and displacements for the system **/ static void WeightSpring2W3S1D( Eigen::Tensor<TensorT, 1>& time_steps, Eigen::Tensor<TensorT, 2>& displacements, const int& n_time_steps, const TensorT& time_intervals, const TensorT& A1, const TensorT& A2, const TensorT& A3, const TensorT& m1, const TensorT& m2, const TensorT& m3, const TensorT& x1o, const TensorT& x2o, const TensorT& k1); // [TODO: add option for gaussian_noise] TensorT gaussian_noise_ = 0; ///< the amount of gaussian noise to add to the oscillator trajectories }; template<typename TensorT> void HarmonicOscillatorSimulator<TensorT>::WeightSpring3W2S1D( Eigen::Tensor<TensorT, 1>& time_steps, Eigen::Tensor<TensorT, 2>& displacements, const int& n_time_steps, const TensorT& time_intervals, const TensorT& A1, const TensorT& A2, const TensorT& A3, const TensorT& m1, const TensorT& m2, const TensorT& m3, const TensorT& x1o, const TensorT& x2o, const TensorT& x3o, const TensorT& k) { // Quick checks assert(n_time_steps == time_steps.dimension(0)); assert(n_time_steps == displacements.dimension(0)); assert(displacements.dimension(1) == 3); // Analytical solutions to for each mass auto x1_lambda = [](const TensorT& t, const TensorT& k, const TensorT& m1, const TensorT& A1, const TensorT& x2) { const TensorT w = sqrt(k / m1); return x2 + A1 * sin(w * t) + A1 * cos(w * t); }; auto x2_lambda = [](const TensorT& t, const TensorT& k, const TensorT& m2, const TensorT& A2, const TensorT& x1, const TensorT& x3) { const TensorT w = sqrt(k / m2); return (k * x1 + k * x3) / (2*k) + A2 * sin(sqrt(2) * w * t) + A2 * cos(sqrt(2) * w * t); }; //auto x2_lambda = [](const TensorT& t, const TensorT& k, // const TensorT& m1, const TensorT& m2, const TensorT& m3, // const TensorT& A1, const TensorT& A2, const TensorT& A3) { // // And after substitution of x1 and x3 // const TensorT w1 = sqrt(k / m1); // const TensorT w2 = sqrt(k / m2); // const TensorT w3 = sqrt(k / m3); // return -(A1 * sin(w1*t) + A1 * cos(w1*t) + A3 * sin(w3*t) + A3 * cos(w3*t)) / 2 - A2 * sin(sqrt(2)*w2*t) - A2 * cos(sqrt(2)*w2*t); //}; auto x3_lambda = [](const TensorT& t, const TensorT& k, const TensorT& m3, const TensorT& A3, const TensorT& x2) { const TensorT w = sqrt(k / m3); return x2 + A3 * sin(w * t) + A3 * cos(w * t); }; // Make the time-steps and displacements time_steps(0) = 0; displacements(0, 0) = x1o; displacements(0, 1) = x2o; displacements(0, 2) = x3o; for (int iter = 1; iter < n_time_steps; ++iter) { time_steps(iter) = time_steps(iter - 1) + time_intervals; displacements(iter, 0) = x1_lambda(time_steps(iter), k, m1, A1, displacements(0, 1)); displacements(iter, 2) = x3_lambda(time_steps(iter), k, m3, A3, displacements(0, 1)); //displacements(iter, 1) = x2_lambda(time_steps(iter), k, m1, m2, m3, A1, A2, A3); displacements(iter, 1) = x2_lambda(time_steps(iter), k, m2, A2, displacements(0, 0), displacements(0, 2)); } } template<typename TensorT> inline void HarmonicOscillatorSimulator<TensorT>::WeightSpring1W1S1D(Eigen::Tensor<TensorT, 1>& time_steps, Eigen::Tensor<TensorT, 2>& displacements, const int& n_time_steps, const TensorT& time_intervals, const TensorT& m1, const TensorT& k1, const TensorT& x1o, const TensorT& v1o) { // Quick checks assert(n_time_steps == time_steps.dimension(0)); assert(n_time_steps == displacements.dimension(0)); assert(displacements.dimension(1) == 1); // Analytical solutions to for each mass auto x1_lambda = [](const TensorT& t, const TensorT& k1, const TensorT& m1, const TensorT& x1o, const TensorT& v1o) { const TensorT w = sqrt(k1 / m1); return x1o * cos(w * t) + v1o / w * sin(w * t); }; // Make the time-steps and displacements time_steps(0) = 0; displacements(0, 0) = x1o; for (int iter = 1; iter < n_time_steps; ++iter) { time_steps(iter) = time_steps(iter - 1) + time_intervals; displacements(iter, 0) = x1_lambda(time_steps(iter), k1, m1, x1o, v1o); } } template<typename TensorT> void HarmonicOscillatorSimulator<TensorT>::WeightSpring1W1S1DwDamping( Eigen::Tensor<TensorT, 1>& time_steps, Eigen::Tensor<TensorT, 2>& displacements, const int& n_time_steps, const TensorT& time_intervals, const TensorT& m1, const TensorT& k1, const TensorT& beta1, const TensorT& x1o, const TensorT& v1o) { // Quick checks assert(n_time_steps == time_steps.dimension(0)); assert(n_time_steps == displacements.dimension(0)); assert(displacements.dimension(1) == 1); // Analytical solutions to for each mass auto x1_lambda = [](const TensorT& t, const TensorT& k1, const TensorT& beta1, const TensorT& m1, const TensorT& x1o, const TensorT& v1o) { const TensorT w = sqrt(k1 / m1); const TensorT check = pow(beta1, 2) - 4 * pow(w, 2); if (check < 0) { const TensorT gamma = 0.5 * sqrt(4 * pow(w, 2) - pow(beta1, 2)); const TensorT A = x1o; const TensorT B = beta1 * x1o / (2 * gamma) + v1o / gamma; return exp(-beta1 * t / 2) * (A * cos(gamma * t) + B * sin(gamma * t)); } else if (check == 0) { const TensorT A = x1o; const TensorT B = w * x1o + v1o; return exp(-w * t) * (A + B * t); } else if (check > 0) { const TensorT rneg = 0.5 * (-beta1 - sqrt(pow(beta1, 2) - 4 * pow(w, 2))); const TensorT rpos = 0.5 * (-beta1 + sqrt(pow(beta1, 2) - 4 * pow(w, 2))); const TensorT A = x1o - (rneg*x1o - v1o)/(rneg - rpos); const TensorT B = (rneg * x1o - v1o) / (rneg - rpos); return A*exp(rneg * t) + B * exp(rpos * t); } }; // Make the time-steps and displacements time_steps(0) = 0; displacements(0, 0) = x1o; for (int iter = 1; iter < n_time_steps; ++iter) { time_steps(iter) = time_steps(iter - 1) + time_intervals; displacements(iter, 0) = x1_lambda(time_steps(iter), k1, beta1, m1, x1o, v1o); } } } #endif //EVONET_HARMONICOSCILLATORSIMULATOR_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_ACTIVATIONFUNCTION_H #define EVONET_ACTIVATIONFUNCTION_H #include <EvoNet/core/Preprocessing.h> #include <unsupported/Eigen/CXX11/Tensor> #include <random> #include <iostream> #include <limits> #include <cereal/access.hpp> // serialiation of private members #undef min // clashes with std::limit on windows in polymorphic.hpp #undef max // clashes with std::limit on windows in polymorphic.hpp #include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Base class for all activation functions. */ template<typename TensorT> class ActivationOp { public: ActivationOp() = default; ActivationOp(const TensorT& eps, const TensorT& min, const TensorT& max) : eps_(eps), min_(min), max_(max) {}; virtual ~ActivationOp() = default; void setEps(const TensorT& eps) { eps_ = eps; } void setMin(const TensorT& min) { min_ = min; } void setMax(const TensorT& max) { max_ = max; } TensorT getEps() const { return eps_; } TensorT getMin() const { return min_; } TensorT getMax() const { return max_; } virtual std::string getName() const = 0; virtual std::vector<TensorT> getParameters() const = 0; virtual ActivationOp<TensorT>* copy() const = 0; //#endif // !EVONET_CUDA protected: TensorT eps_ = (TensorT)1e-6; ///< threshold to clip between min and max TensorT min_ =TensorT(-1e9); TensorT max_ = TensorT(1e9); private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(eps_, min_, max_); } }; /** @brief Rectified Linear Unit (ReLU) activation function References: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2000). Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit. Nature. 405. pp. 947–951. */ template<typename TensorT> class ReLUOp: public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const{return "ReLUOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->getEps(), this->getMin(), this->getMax()}); } ActivationOp<TensorT>* copy() const { return new ReLUOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Rectified Linear Unit (ReLU) gradient References: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2000). Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit. Nature. 405. pp. 947–951. */ template<typename TensorT> class ReLUGradOp: public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const{return "ReLUGradOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new ReLUGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Exponential Linear Unit (ELU) activation function References: <NAME>; <NAME>; <NAME> (2015). "Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)". arXiv:1511.07289 */ template<typename TensorT> class ELUOp: public ActivationOp<TensorT> { public: ELUOp() = default; ~ELUOp() = default; ELUOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& alpha) : ActivationOp<TensorT>(eps, min, max), alpha_(alpha) {}; void setAlpha(const TensorT& alpha) { alpha_ = alpha; }; TensorT getAlpha() const { return alpha_; }; std::string getName() const{return "ELUOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax(), alpha_ }); } ActivationOp<TensorT>* copy() const { return new ELUOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this), alpha_); } TensorT alpha_ = 1; }; /** @brief Exponential Linear Unit (ELU) gradient References: <NAME>; <NAME>; <NAME> (2015). "Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)". arXiv:1511.07289 */ template<typename TensorT> class ELUGradOp: public ActivationOp<TensorT> { public: ELUGradOp() = default; ~ELUGradOp() = default; ELUGradOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& alpha) : ActivationOp<TensorT>(eps, min, max), alpha_(alpha) {}; ELUGradOp(const TensorT& alpha): alpha_(alpha){}; void setAlpha(const TensorT& alpha) { alpha_ = alpha; }; TensorT getAlpha() const { return alpha_; }; std::string getName() const{return "ELUGradOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax(), alpha_ }); } ActivationOp<TensorT>* copy() const { return new ELUGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this), alpha_); } TensorT alpha_ = 1; }; /** @brief Sigmoid activation function */ template<typename TensorT> class SigmoidOp: public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const{return "SigmoidOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new SigmoidOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Sigmoid gradient */ template<typename TensorT> class SigmoidGradOp: public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const{return "SigmoidGradOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new SigmoidGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Hyperbolic Tangent activation function */ template<typename TensorT> class TanHOp: public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const{return "TanHOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new TanHOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Hyperbolic Tangent gradient */ template<typename TensorT> class TanHGradOp: public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const{return "TanHGradOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new TanHGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Rectified Hyperbolic Tangent activation function */ template<typename TensorT> class ReTanHOp: public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const{return "ReTanHOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new ReTanHOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Rectified Hyperbolic Tangent gradient */ template<typename TensorT> class ReTanHGradOp: public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const{return "ReTanHGradOp";}; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new ReTanHGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Linear activation function */ template<typename TensorT> class LinearOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "LinearOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new LinearOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Linear gradient */ template<typename TensorT> class LinearGradOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "LinearGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new LinearGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Inverse activation function */ template<typename TensorT> class InverseOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "InverseOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new InverseOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Inverse gradient */ template<typename TensorT> class InverseGradOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "InverseGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new InverseGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Exponential activation function */ template<typename TensorT> class ExponentialOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "ExponentialOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new ExponentialOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Exponential gradient */ template<typename TensorT> class ExponentialGradOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "ExponentialGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new ExponentialGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Log activation function */ template<typename TensorT> class LogOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "LogOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new LogOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Log gradient */ template<typename TensorT> class LogGradOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "LogGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new LogGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Pow activation function */ template<typename TensorT> class PowOp : public ActivationOp<TensorT> { public: PowOp() = default; ~PowOp() = default; PowOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& base) : ActivationOp<TensorT>(eps, min, max), base_(base) {}; PowOp(const TensorT& base) : base_(base) {}; std::string getName() const { return "PowOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax(), base_ }); } ActivationOp<TensorT>* copy() const { return new PowOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this), base_); } TensorT base_; }; /** @brief Pow gradient */ template<typename TensorT> class PowGradOp : public ActivationOp<TensorT> { public: PowGradOp() = default; ~PowGradOp() = default; PowGradOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& base) : ActivationOp<TensorT>(eps, min, max), base_(base) {}; PowGradOp(const TensorT& base) : base_(base) {}; std::string getName() const { return "PowGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax(), base_ }); } ActivationOp<TensorT>* copy() const { return new PowGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this), base_); } TensorT base_; }; /** @brief LeakyReLU activation function default alpha = 1e-2 */ template<typename TensorT> class LeakyReLUOp : public ActivationOp<TensorT> { public: LeakyReLUOp() = default; ~LeakyReLUOp() = default; LeakyReLUOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& alpha) : ActivationOp<TensorT>(eps, min, max), alpha_(alpha) {}; LeakyReLUOp(const TensorT& alpha) : alpha_(alpha) {}; void setAlpha(const TensorT& alpha) { alpha_ = alpha; }; TensorT getAlpha() const { return alpha_; }; std::string getName() const { return "LeakyReLUOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax(), alpha_ }); } ActivationOp<TensorT>* copy() const { return new LeakyReLUOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this), alpha_); } TensorT alpha_ = 1e-2; }; /** @brief LeakyReLU gradient */ template<typename TensorT> class LeakyReLUGradOp : public ActivationOp<TensorT> { public: LeakyReLUGradOp() = default; ~LeakyReLUGradOp() = default; LeakyReLUGradOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& alpha) : ActivationOp<TensorT>(eps, min, max), alpha_(alpha) {}; void setAlpha(const TensorT& alpha) { alpha_ = alpha; }; TensorT getAlpha() const { return alpha_; }; std::string getName() const { return "LeakyReLUGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax(), alpha_ }); } ActivationOp<TensorT>* copy() const { return new LeakyReLUGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this), alpha_); } TensorT alpha_ = 1e-2; }; /** @brief Sin activation function */ template<typename TensorT> class SinOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "SinOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new SinOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Sin gradient */ template<typename TensorT> class SinGradOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "SinGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new SinGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Cos activation function */ template<typename TensorT> class CosOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "CosOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new CosOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief Cos gradient */ template<typename TensorT> class CosGradOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "CosGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new CosGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief BatchNorm activation function */ template<typename TensorT> class BatchNormOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "BatchNormOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new BatchNormOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; /** @brief BatchNorm gradient */ template<typename TensorT> class BatchNormGradOp : public ActivationOp<TensorT> { public: using ActivationOp<TensorT>::ActivationOp; std::string getName() const { return "BatchNormGradOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->getEps(), this->getMin(), this->getMax() }); } ActivationOp<TensorT>* copy() const { return new BatchNormGradOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ActivationOp<TensorT>>(this)); } }; } CEREAL_REGISTER_TYPE(EvoNet::ReLUOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ReLUGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ELUOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ELUGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::SigmoidOp<float>); CEREAL_REGISTER_TYPE(EvoNet::SigmoidGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::TanHOp<float>); CEREAL_REGISTER_TYPE(EvoNet::TanHGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ReTanHOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ReTanHGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::LinearOp<float>); CEREAL_REGISTER_TYPE(EvoNet::LinearGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::InverseOp<float>); CEREAL_REGISTER_TYPE(EvoNet::InverseGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ExponentialOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ExponentialGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::LogOp<float>); CEREAL_REGISTER_TYPE(EvoNet::LogGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::PowOp<float>); CEREAL_REGISTER_TYPE(EvoNet::PowGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUOp<float>); CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::SinOp<float>); CEREAL_REGISTER_TYPE(EvoNet::SinGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::CosOp<float>); CEREAL_REGISTER_TYPE(EvoNet::CosGradOp<float>); CEREAL_REGISTER_TYPE(EvoNet::BatchNormOp<float>); CEREAL_REGISTER_TYPE(EvoNet::BatchNormGradOp<float>); //CEREAL_REGISTER_TYPE(EvoNet::ReLUOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ReLUGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ELUOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ELUGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SigmoidOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SigmoidGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::TanHOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::TanHGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ReTanHOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ReTanHGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::LinearOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::LinearGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::InverseOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::InverseGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ExponentialOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ExponentialGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::LogOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::LogGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::PowOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::PowGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SinOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::SinGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::CosOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::CosGradOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::BatchNormOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::BatchNormGradOp<double>); // //CEREAL_REGISTER_TYPE(EvoNet::ReLUOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ReLUGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ELUOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ELUGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::SigmoidOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::SigmoidGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::TanHOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::TanHGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ReTanHOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ReTanHGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::LinearOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::LinearGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::InverseOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::InverseGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ExponentialOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ExponentialGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::LogOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::LogGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::PowOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::PowGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::SinOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::SinGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::CosOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::CosGradOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::BatchNormOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::BatchNormGradOp<int>); #endif //EVONET_ACTIVATIONFUNCTION_H<file_sep>### the directory name set(directory include/EvoNet/simulator) ### list all header files of the directory here set(sources_list_h AddProbSimulator.h BiochemicalDataSimulator.h BiochemicalReaction.h ChromatogramSimulator.h DataSimulator.h EMGModel.h HarmonicOscillatorSimulator.h MetabolomicsLatentArithmeticDataSimulator.h MetabolomicsClassificationDataSimulator.h MetabolomicsLatentTraversalDataSimulator.h MetabolomicsLatentUnsClassDataSimulator.h MetabolomicsReconstructionDataSimulator.h MNISTSimulator.h PeakSimulator.h ) ### add path to the filenames set(sources_h) foreach(i ${sources_list_h}) list(APPEND sources_h ${directory}/${i}) endforeach(i) ### source group definition source_group("Header Files\\EvoNet\\simulator" FILES ${sources_h}) set(EvoNet_sources_h ${EvoNet_sources_h} ${sources_h}) <file_sep> cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) project("EvoNet_class_tests") #------------------------------------------------------------------------------ # set new CMAKE_RUNTIME_OUTPUT_DIRECTORY for tests and remember old setting set(_TMP_CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin) #------------------------------------------------------------------------------ # add EvoNet specific tests add_subdirectory(evonet) #------------------------------------------------------------------------------ # restore old CMAKE_RUNTIME_OUTPUT_DIRECTORY set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${_TMP_CMAKE_RUNTIME_OUTPUT_DIRECTORY}) <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE LossFunctionTensor test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/LossFunctionTensor.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(lossFunctionTensor) /** ManhattanDistanceLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorManhattanDistanceLossOp) { ManhattanDistanceLossTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ManhattanDistanceLossTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorManhattanDistanceLossOp) { ManhattanDistanceLossTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new ManhattanDistanceLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionManhattanDistanceLossOp) { ManhattanDistanceLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 1, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 1, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** ManhattanDistanceLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorManhattanDistanceLossGradOp) { ManhattanDistanceLossGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ManhattanDistanceLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorManhattanDistanceLossGradOp) { ManhattanDistanceLossGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new ManhattanDistanceLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionManhattanDistanceLossGradOp) { ManhattanDistanceLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 0.0, 1e-4); //-nan BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -1.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), 1.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** L2NormLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorL2NormOp) { L2NormLossTensorOp<float, Eigen::DefaultDevice>* ptrL2Norm = nullptr; L2NormLossTensorOp<float, Eigen::DefaultDevice>* nullPointerL2Norm = nullptr; BOOST_CHECK_EQUAL(ptrL2Norm, nullPointerL2Norm); } BOOST_AUTO_TEST_CASE(destructorL2NormOp) { L2NormLossTensorOp<float, Eigen::DefaultDevice>* ptrL2Norm = nullptr; ptrL2Norm = new L2NormLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrL2Norm; } BOOST_AUTO_TEST_CASE(operationfunctionL2NormOp) { L2NormLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0.5, 1e-4); //TODO BOOST_CHECK_CLOSE(error(1, 0), -2.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** L2NormLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorL2NormGradOp) { L2NormLossGradTensorOp<float, Eigen::DefaultDevice>* ptrL2Norm = nullptr; L2NormLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerL2Norm = nullptr; BOOST_CHECK_EQUAL(ptrL2Norm, nullPointerL2Norm); } BOOST_AUTO_TEST_CASE(destructorL2NormGradOp) { L2NormLossGradTensorOp<float, Eigen::DefaultDevice>* ptrL2Norm = nullptr; ptrL2Norm = new L2NormLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrL2Norm; } BOOST_AUTO_TEST_CASE(operationfunctionL2NormGradOp) { L2NormLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -1.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), 1.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** CrossEntropyOp Tests */ BOOST_AUTO_TEST_CASE(constructorCrossEntropyOp) { BCELossTensorOp<float, Eigen::DefaultDevice>* ptrCrossEntropy = nullptr; BCELossTensorOp<float, Eigen::DefaultDevice>* nullPointerCrossEntropy = nullptr; BOOST_CHECK_EQUAL(ptrCrossEntropy, nullPointerCrossEntropy); } BOOST_AUTO_TEST_CASE(destructorCrossEntropyOp) { BCELossTensorOp<float, Eigen::DefaultDevice>* ptrCrossEntropy = nullptr; ptrCrossEntropy = new BCELossTensorOp<float, Eigen::DefaultDevice>(); delete ptrCrossEntropy; } BOOST_AUTO_TEST_CASE(operationfunctionCrossEntropyOp) { BCELossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0}, {1, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{.1, .9}, {0, 0}}, {{.9, .1}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 4.60517025, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.21072109, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** CrossEntropyGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorCrossEntropyGradOp) { BCELossGradTensorOp<float, Eigen::DefaultDevice>* ptrCrossEntropy = nullptr; BCELossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerCrossEntropy = nullptr; BOOST_CHECK_EQUAL(ptrCrossEntropy, nullPointerCrossEntropy); } BOOST_AUTO_TEST_CASE(destructorCrossEntropyGradOp) { BCELossGradTensorOp<float, Eigen::DefaultDevice>* ptrCrossEntropy = nullptr; ptrCrossEntropy = new BCELossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrCrossEntropy; } BOOST_AUTO_TEST_CASE(operationfunctionCrossEntropyGradOp) { BCELossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0}, {1, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{.1, .9}, {0, 0}}, {{.9, .1}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 10.0000, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), 1.11111116, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -10.0000, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), -1.11111116, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** NegativeLogLikelihoodLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorNegativeLogLikelihoodOp) { NegativeLogLikelihoodLossTensorOp<float, Eigen::DefaultDevice>* ptrNegativeLogLikelihood = nullptr; NegativeLogLikelihoodLossTensorOp<float, Eigen::DefaultDevice>* nullPointerNegativeLogLikelihood = nullptr; BOOST_CHECK_EQUAL(ptrNegativeLogLikelihood, nullPointerNegativeLogLikelihood); } BOOST_AUTO_TEST_CASE(destructorNegativeLogLikelihoodOp) { NegativeLogLikelihoodLossTensorOp<float, Eigen::DefaultDevice>* ptrNegativeLogLikelihood = nullptr; ptrNegativeLogLikelihood = new NegativeLogLikelihoodLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrNegativeLogLikelihood; } BOOST_AUTO_TEST_CASE(operationfunctionNegativeLogLikelihoodOp) { NegativeLogLikelihoodLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0}, {1, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{.1, .9}, {0, 0}}, {{.9, .1}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 1.15129256, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.0526802726, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** NegativeLogLikelihoodLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorNegativeLogLikelihoodGradOp) { NegativeLogLikelihoodLossGradTensorOp<float, Eigen::DefaultDevice>* ptrNegativeLogLikelihood = nullptr; NegativeLogLikelihoodLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerNegativeLogLikelihood = nullptr; BOOST_CHECK_EQUAL(ptrNegativeLogLikelihood, nullPointerNegativeLogLikelihood); } BOOST_AUTO_TEST_CASE(destructorNegativeLogLikelihoodGradOp) { NegativeLogLikelihoodLossGradTensorOp<float, Eigen::DefaultDevice>* ptrNegativeLogLikelihood = nullptr; ptrNegativeLogLikelihood = new NegativeLogLikelihoodLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrNegativeLogLikelihood; } BOOST_AUTO_TEST_CASE(operationfunctionNegativeLogLikelihoodGradOp) { NegativeLogLikelihoodLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0}, {1, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{.1, .9}, {0, 0}}, {{.9, .1}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), -5.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -0.555555582, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** MSELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSEOp) { MSELossTensorOp<float, Eigen::DefaultDevice>* ptrMSE = nullptr; MSELossTensorOp<float, Eigen::DefaultDevice>* nullPointerMSE = nullptr; BOOST_CHECK_EQUAL(ptrMSE, nullPointerMSE); } BOOST_AUTO_TEST_CASE(destructorMSEOp) { MSELossTensorOp<float, Eigen::DefaultDevice>* ptrMSE = nullptr; ptrMSE = new MSELossTensorOp<float, Eigen::DefaultDevice>(); delete ptrMSE; } BOOST_AUTO_TEST_CASE(operationfunctionMSEOp) { MSELossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0.25, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.25, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MSELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSEGradOp) { MSELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMSE = nullptr; MSELossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerMSE = nullptr; BOOST_CHECK_EQUAL(ptrMSE, nullPointerMSE); } BOOST_AUTO_TEST_CASE(destructorMSEGradOp) { MSELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMSE = nullptr; ptrMSE = new MSELossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrMSE; } BOOST_AUTO_TEST_CASE(operationfunctionMSEGradOp) { MSELossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -0.5, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** MAELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAEOp) { MAELossTensorOp<float, Eigen::DefaultDevice>* ptrMAE = nullptr; MAELossTensorOp<float, Eigen::DefaultDevice>* nullPointerMAE = nullptr; BOOST_CHECK_EQUAL(ptrMAE, nullPointerMAE); } BOOST_AUTO_TEST_CASE(destructorMAEOp) { MAELossTensorOp<float, Eigen::DefaultDevice>* ptrMAE = nullptr; ptrMAE = new MAELossTensorOp<float, Eigen::DefaultDevice>(); delete ptrMAE; } BOOST_AUTO_TEST_CASE(operationfunctionMAEOp) { MAELossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MAELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAEGradOp) { MAELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMAE = nullptr; MAELossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerMAE = nullptr; BOOST_CHECK_EQUAL(ptrMAE, nullPointerMAE); } BOOST_AUTO_TEST_CASE(destructorMAEGradOp) { MAELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMAE = nullptr; ptrMAE = new MAELossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrMAE; } BOOST_AUTO_TEST_CASE(operationfunctionMAEGradOp) { MAELossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -0.499999523, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** MRSELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMRSEOp) { MRSELossTensorOp<float, Eigen::DefaultDevice>* ptrMRSE = nullptr; MRSELossTensorOp<float, Eigen::DefaultDevice>* nullPointerMRSE = nullptr; BOOST_CHECK_EQUAL(ptrMRSE, nullPointerMRSE); } BOOST_AUTO_TEST_CASE(destructorMRSEOp) { MRSELossTensorOp<float, Eigen::DefaultDevice>* ptrMRSE = nullptr; ptrMRSE = new MRSELossTensorOp<float, Eigen::DefaultDevice>(); delete ptrMRSE; } BOOST_AUTO_TEST_CASE(operationfunctionMRSEOp) { MRSELossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 1.5, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 1.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MRSELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMRSEGradOp) { MRSELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMRSE = nullptr; MRSELossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerMRSE = nullptr; BOOST_CHECK_EQUAL(ptrMRSE, nullPointerMRSE); } BOOST_AUTO_TEST_CASE(destructorMRSEGradOp) { MRSELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMRSE = nullptr; ptrMRSE = new MRSELossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrMRSE; } BOOST_AUTO_TEST_CASE(operationfunctionMRSEGradOp) { MRSELossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), -499999.969, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -499999.969, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -707106.688, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), -707106.688, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** MLELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMLEOp) { MLELossTensorOp<float, Eigen::DefaultDevice>* ptrMLE = nullptr; MLELossTensorOp<float, Eigen::DefaultDevice>* nullPointerMLE = nullptr; BOOST_CHECK_EQUAL(ptrMLE, nullPointerMLE); } BOOST_AUTO_TEST_CASE(destructorMLEOp) { MLELossTensorOp<float, Eigen::DefaultDevice>* ptrMLE = nullptr; ptrMLE = new MLELossTensorOp<float, Eigen::DefaultDevice>(); delete ptrMLE; } BOOST_AUTO_TEST_CASE(operationfunctionMLEOp) { MLELossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0.346573591, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.346573591, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MLELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMLEGradOp) { MLELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMLE = nullptr; MLELossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerMLE = nullptr; BOOST_CHECK_EQUAL(ptrMLE, nullPointerMLE); } BOOST_AUTO_TEST_CASE(destructorMLEGradOp) { MLELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMLE = nullptr; ptrMLE = new MLELossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrMLE; } BOOST_AUTO_TEST_CASE(operationfunctionMLEGradOp) { MLELossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), -0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -0.5, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -0.250000119, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), -0.250000119, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** KLDivergenceMuLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceMuOp) { KLDivergenceMuLossTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceMu = nullptr; KLDivergenceMuLossTensorOp<float, Eigen::DefaultDevice>* nullPointerKLDivergenceMu = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceMu, nullPointerKLDivergenceMu); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceMuOp) { KLDivergenceMuLossTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceMu = nullptr; ptrKLDivergenceMu = new KLDivergenceMuLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrKLDivergenceMu; } BOOST_AUTO_TEST_CASE(operationfunctionKLDivergenceMuOp) { // Without capacity KLDivergenceMuLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 3, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); // With capacity KLDivergenceMuLossTensorOp<float, Eigen::DefaultDevice> operationC(1e-3, 1, 5); float errorC_ptr[] = { 0, 0, 0, 0 }; operationC(y_pred.data(), y_true.data(), errorC_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> errorC(errorC_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(errorC(0, 0), -5, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0), -2, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1), 0, 1e-4); } /** KLDivergenceMuLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceMuGradOp) { KLDivergenceMuLossGradTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceMu = nullptr; KLDivergenceMuLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerKLDivergenceMu = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceMu, nullPointerKLDivergenceMu); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceMuGradOp) { KLDivergenceMuLossGradTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceMu = nullptr; ptrKLDivergenceMu = new KLDivergenceMuLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrKLDivergenceMu; } BOOST_AUTO_TEST_CASE(operationfunctionKLDivergenceMuGradOp) { // Without capacity KLDivergenceMuLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), -2.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -4.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -2.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), -4.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); // With capacity KLDivergenceMuLossGradTensorOp<float, Eigen::DefaultDevice> operationC(1e-4, 1, 5); float errorC_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; operationC(y_pred.data(), y_true.data(), errorC_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> errorC(errorC_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(errorC(0, 0, 0), 3.0, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0, 0), 1.0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 0, 1), 3.0, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0, 1), 1.0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1, 1), 0.0, 1e-4); } /** KLDivergenceLogVarLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceLogVarOp) { KLDivergenceLogVarLossTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceLogVar = nullptr; KLDivergenceLogVarLossTensorOp<float, Eigen::DefaultDevice>* nullPointerKLDivergenceLogVar = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceLogVar, nullPointerKLDivergenceLogVar); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceLogVarOp) { KLDivergenceLogVarLossTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceLogVar = nullptr; ptrKLDivergenceLogVar = new KLDivergenceLogVarLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrKLDivergenceLogVar; } BOOST_AUTO_TEST_CASE(operationfunctionKLDivergenceLogVarOp2) { // Without capacity KLDivergenceLogVarLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 1.29744244, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 2.43656349, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); // With capacity KLDivergenceLogVarLossTensorOp<float, Eigen::DefaultDevice> operationC(1e-3, 1, 5); float errorC_ptr[] = { 0, 0, 0, 0 }; operationC(y_pred.data(), y_true.data(), errorC_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> errorC(errorC_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(errorC(0, 0), -3.70255756, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0), -2.56343651, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1), 0, 1e-4); } /** KLDivergenceLogVarLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceLogVarGradOp) { KLDivergenceLogVarLossGradTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceLogVar = nullptr; KLDivergenceLogVarLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerKLDivergenceLogVar = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceLogVar, nullPointerKLDivergenceLogVar); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceLogVarGradOp) { KLDivergenceLogVarLossGradTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceLogVar = nullptr; ptrKLDivergenceLogVar = new KLDivergenceLogVarLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrKLDivergenceLogVar; } BOOST_AUTO_TEST_CASE(operationfunctionKLDivergenceLogVarGradOp) { // Without capacity KLDivergenceLogVarLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), -1.14872122, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -2.21828175, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -1.14872122, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), -2.21828175, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); // With capacity KLDivergenceLogVarLossGradTensorOp<float, Eigen::DefaultDevice> operationC(1e-4, 1, 5); float errorC_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; operationC(y_pred.data(), y_true.data(), errorC_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> errorC(errorC_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(errorC(0, 0, 0), 3.85127878, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0, 0), 2.78171825, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 0, 1), 3.85127878, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0, 1), 2.78171825, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1, 1), 0.0, 1e-4); } /** BCEWithLogitsLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorBCEWithLogitsOp) { BCEWithLogitsLossTensorOp<float, Eigen::DefaultDevice>* ptrBCEWithLogits = nullptr; BCEWithLogitsLossTensorOp<float, Eigen::DefaultDevice>* nullPointerBCEWithLogits = nullptr; BOOST_CHECK_EQUAL(ptrBCEWithLogits, nullPointerBCEWithLogits); } BOOST_AUTO_TEST_CASE(destructorBCEWithLogitsOp) { BCEWithLogitsLossTensorOp<float, Eigen::DefaultDevice>* ptrBCEWithLogits = nullptr; ptrBCEWithLogits = new BCEWithLogitsLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrBCEWithLogits; } BOOST_AUTO_TEST_CASE(operationfunctionBCEWithLogitsOp) { BCEWithLogitsLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0},{0, 1} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 2}, {0, 0}}, {{1, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 2.44018984, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 1.44018972, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** BCEWithLogitsLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorBCEWithLogitsGradOp) { BCEWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>* ptrBCEWithLogits = nullptr; BCEWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerBCEWithLogits = nullptr; BOOST_CHECK_EQUAL(ptrBCEWithLogits, nullPointerBCEWithLogits); } BOOST_AUTO_TEST_CASE(destructorBCEWithLogitsGradOp) { BCEWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>* ptrBCEWithLogits = nullptr; ptrBCEWithLogits = new BCEWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrBCEWithLogits; } BOOST_AUTO_TEST_CASE(operationfunctionBCEWithLogitsGradOp) { BCEWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0},{0, 1} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 2}, {0, 0}}, {{1, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 0.268941402, 1e-4); //0.268941432 BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -0.731058598, 1e-4); //-0.731058598 BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -0.880797088, 1e-4); //-0.880797088 BOOST_CHECK_CLOSE(error(0, 1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.119202971, 1e-4); //0.119202919 BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** CrossEntropyWithLogitsLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorCrossEntropyWithLogitsOp) { CrossEntropyWithLogitsLossTensorOp<float, Eigen::DefaultDevice>* ptrCrossEntropyWithLogits = nullptr; CrossEntropyWithLogitsLossTensorOp<float, Eigen::DefaultDevice>* nullPointerCrossEntropyWithLogits = nullptr; BOOST_CHECK_EQUAL(ptrCrossEntropyWithLogits, nullPointerCrossEntropyWithLogits); } BOOST_AUTO_TEST_CASE(destructorCrossEntropyWithLogitsOp) { CrossEntropyWithLogitsLossTensorOp<float, Eigen::DefaultDevice>* ptrCrossEntropyWithLogits = nullptr; ptrCrossEntropyWithLogits = new CrossEntropyWithLogitsLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrCrossEntropyWithLogits; } BOOST_AUTO_TEST_CASE(operationfunctionCrossEntropyWithLogitsOp1) { CrossEntropyWithLogitsLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ //{1, 0},{0, 1} {1, 0}, {1, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ //{{1, 2}, {0, 0}}, //{{1, 2}, {0, 0}} { {0, 2.19722}, {0, 0}}, {{2.19722, 0}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0.656630814, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.156630829, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0), 1.15129054, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.0526805036, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** CrossEntropyWithLogitsLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorCrossEntropyWithLogitsGradOp) { CrossEntropyWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>* ptrCrossEntropyWithLogits = nullptr; CrossEntropyWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerCrossEntropyWithLogits = nullptr; BOOST_CHECK_EQUAL(ptrCrossEntropyWithLogits, nullPointerCrossEntropyWithLogits); } BOOST_AUTO_TEST_CASE(destructorCrossEntropyWithLogitsGradOp) { CrossEntropyWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>* ptrCrossEntropyWithLogits = nullptr; ptrCrossEntropyWithLogits = new CrossEntropyWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrCrossEntropyWithLogits; } BOOST_AUTO_TEST_CASE(operationfunctionCrossEntropyWithLogitsGradOp1) { CrossEntropyWithLogitsLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ //{1, 0},{0, 1} {1, 0}, {1, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ //{{1, 2}, {0, 0}}, //{{1, 2}, {0, 0}} { {0, 2.19722}, {0, 0}}, {{2.19722, 0}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); //BOOST_CHECK_CLOSE(error(0, 0, 0), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0, 0), -0.5, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(0, 0, 1), -1.0, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0, 1), -0.5, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); // Option 1 BOOST_CHECK_CLOSE(error(0, 0, 0), 0.5, 1e-4); // NegLogLiklihoodGrad = -4.99994993 BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -0.598610044, 1e-4); // NegLogLiklihoodGrad = -0.555554926 BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -1.09861004, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); //// Option 2 //BOOST_CHECK_CLOSE(error(0, 0, 0), -4.9999299, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0, 0), -0.555555224, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(0, 0, 1), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** MSERangeUBLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSERangeUBOp) { MSERangeUBLossTensorOp<float, Eigen::DefaultDevice>* ptrMSERangeUB = nullptr; MSERangeUBLossTensorOp<float, Eigen::DefaultDevice>* nullPointerMSERangeUB = nullptr; BOOST_CHECK_EQUAL(ptrMSERangeUB, nullPointerMSERangeUB); } BOOST_AUTO_TEST_CASE(destructorMSERangeUBOp) { MSERangeUBLossTensorOp<float, Eigen::DefaultDevice>* ptrMSERangeUB = nullptr; ptrMSERangeUB = new MSERangeUBLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrMSERangeUB; } BOOST_AUTO_TEST_CASE(operationfunctionMSERangeUBOp) { MSERangeUBLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 3}, {0, 0}}, {{0, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0.25, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MSERangeUBLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSERangeUBGradOp) { MSERangeUBLossGradTensorOp<float, Eigen::DefaultDevice>* ptrMSERangeUB = nullptr; MSERangeUBLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerMSERangeUB = nullptr; BOOST_CHECK_EQUAL(ptrMSERangeUB, nullPointerMSERangeUB); } BOOST_AUTO_TEST_CASE(destructorMSERangeUBGradOp) { MSERangeUBLossGradTensorOp<float, Eigen::DefaultDevice>* ptrMSERangeUB = nullptr; ptrMSERangeUB = new MSERangeUBLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrMSERangeUB; } BOOST_AUTO_TEST_CASE(operationfunctionMSERangeUBGradOp) { MSERangeUBLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 3}, {0, 0}}, {{0, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** MSERangeLBLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSERangeLBOp) { MSERangeLBLossTensorOp<float, Eigen::DefaultDevice>* ptrMSERangeLB = nullptr; MSERangeLBLossTensorOp<float, Eigen::DefaultDevice>* nullPointerMSERangeLB = nullptr; BOOST_CHECK_EQUAL(ptrMSERangeLB, nullPointerMSERangeLB); } BOOST_AUTO_TEST_CASE(destructorMSERangeLBOp) { MSERangeLBLossTensorOp<float, Eigen::DefaultDevice>* ptrMSERangeLB = nullptr; ptrMSERangeLB = new MSERangeLBLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrMSERangeLB; } BOOST_AUTO_TEST_CASE(operationfunctionMSERangeLBOp) { MSERangeLBLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 3}, {0, 0}}, {{0, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.25, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MSERangeLBLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSERangeLBGradOp) { MSERangeLBLossGradTensorOp<float, Eigen::DefaultDevice>* ptrMSERangeLB = nullptr; MSERangeLBLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerMSERangeLB = nullptr; BOOST_CHECK_EQUAL(ptrMSERangeLB, nullPointerMSERangeLB); } BOOST_AUTO_TEST_CASE(destructorMSERangeLBGradOp) { MSERangeLBLossGradTensorOp<float, Eigen::DefaultDevice>* ptrMSERangeLB = nullptr; ptrMSERangeLB = new MSERangeLBLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrMSERangeLB; } BOOST_AUTO_TEST_CASE(operationfunctionMSERangeLBGradOp) { MSERangeLBLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 3}, {0, 0}}, {{0, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } /** KLDivergenceCatLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceCatOp) { KLDivergenceCatLossTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceCat = nullptr; KLDivergenceCatLossTensorOp<float, Eigen::DefaultDevice>* nullPointerKLDivergenceCat = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceCat, nullPointerKLDivergenceCat); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceCatOp) { KLDivergenceCatLossTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceCat = nullptr; ptrKLDivergenceCat = new KLDivergenceCatLossTensorOp<float, Eigen::DefaultDevice>(); delete ptrKLDivergenceCat; } BOOST_AUTO_TEST_CASE(operationfunctionKLDivergenceCatOp) { // Without capacity KLDivergenceCatLossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0.693147182, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 3.46573591, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); // With capacity KLDivergenceCatLossTensorOp<float, Eigen::DefaultDevice> operationC(1e-3, 1, 5); float errorC_ptr[] = { 0, 0, 0, 0 }; operationC(y_pred.data(), y_true.data(), errorC_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> errorC(errorC_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(errorC(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0), 2.77258873, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1), 0, 1e-4); } /** KLDivergenceCatLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceCatGradOp) { KLDivergenceCatLossGradTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceCat = nullptr; KLDivergenceCatLossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerKLDivergenceCat = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceCat, nullPointerKLDivergenceCat); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceCatGradOp) { KLDivergenceCatLossGradTensorOp<float, Eigen::DefaultDevice>* ptrKLDivergenceCat = nullptr; ptrKLDivergenceCat = new KLDivergenceCatLossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrKLDivergenceCat; } BOOST_AUTO_TEST_CASE(operationfunctionKLDivergenceCatGradOp) { // No capacity KLDivergenceCatLossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), -1, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -1.69314718, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), -1, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), -1.69314718, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); // With capacity KLDivergenceCatLossGradTensorOp<float, Eigen::DefaultDevice> operationC(1e-4, 1, 5); float errorC_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; operationC(y_pred.data(), y_true.data(), errorC_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> errorC(errorC_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(errorC(0, 0, 0), -0.306852818, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0, 0), -1, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 0, 1), -0.306852818, 1e-4); BOOST_CHECK_CLOSE(errorC(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 0, 1), -1, 1e-4); BOOST_CHECK_CLOSE(errorC(1, 1, 1), 0.0, 1e-4); } /** MAPELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAPELossOp) { MAPELossTensorOp<float, Eigen::DefaultDevice>* ptrMAPELoss = nullptr; MAPELossTensorOp<float, Eigen::DefaultDevice>* nullPointerMAPELoss = nullptr; BOOST_CHECK_EQUAL(ptrMAPELoss, nullPointerMAPELoss); } BOOST_AUTO_TEST_CASE(destructorMAPELossOp) { MAPELossTensorOp<float, Eigen::DefaultDevice>* ptrMAPELoss = nullptr; ptrMAPELoss = new MAPELossTensorOp<float, Eigen::DefaultDevice>(); delete ptrMAPELoss; } BOOST_AUTO_TEST_CASE(operationfunctionMAPELossOp) { MAPELossTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, batch_size, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0.249999881, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.499999523, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MAPELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAPELossGradOp) { MAPELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMAPELoss = nullptr; MAPELossGradTensorOp<float, Eigen::DefaultDevice>* nullPointerMAPELoss = nullptr; BOOST_CHECK_EQUAL(ptrMAPELoss, nullPointerMAPELoss); } BOOST_AUTO_TEST_CASE(destructorMAPELossGradOp) { MAPELossGradTensorOp<float, Eigen::DefaultDevice>* ptrMAPELoss = nullptr; ptrMAPELoss = new MAPELossGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrMAPELoss; } BOOST_AUTO_TEST_CASE(operationfunctionMAPELossGradOp) { MAPELossGradTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 2; const int time_step = 0; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 2}, {1, 2} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); float error_ptr[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, time_step, device); Eigen::TensorMap<Eigen::Tensor<float, 3>> error(error_ptr, batch_size, memory_size, layer_size); BOOST_CHECK_CLOSE(error(0, 0, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 0), -0.5, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 0), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(0, 0, 1), 0.250000149, 1e-4); BOOST_CHECK_CLOSE(error(0, 1, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0, 1), 0.0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1, 1), 0.0, 1e-4); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MNISTSIMULATOR_H #define EVONET_MNISTSIMULATOR_H #include <EvoNet/simulator/DataSimulator.h> #include <EvoNet/core/Preprocessing.h> namespace EvoNet { /** @brief A class to generate data using the MNIST data set */ template<typename TensorT> class MNISTSimulator: public DataSimulator<TensorT> { public: int ReverseInt(int i); /* @brief Read in the MNIST data set from an IDX file format. Output data for sample dimensions are the following: dim 0: sample dim 1: col-wise pixel intensity Output data for label dimensions are the following: dim 0: sample dim 1: class label See http://yann.lecun.com/exdb/mnist/ for a description of the data set and the file format @param[in] filename @param[in, out] data The tensor to hold the data @param[in] is_labels True if the file corresponds to class labels, False otherwise */ void ReadMNIST(const std::string& filename, Eigen::Tensor<TensorT, 2>& data, const bool& is_labels); void readData(const std::string& filename_data, const std::string& filename_labels, const bool& is_training, const int& data_size, const int& input_size); void smoothLabels(const TensorT& zero_offset, const TensorT& one_offset); ///< Read in the MNIST data set from an IDX file format void unitScaleData(); ///< Unit scale training and test pixels void centerUnitScaleData(); ///< Center and scale training and test pixels /* @brief Corrupt training data by zero-ing a random amount of pixels @param[in] fraction_corruption The fraction of training pixels to corrupt */ void corruptTrainingData(const TensorT& fraction_corruption); /** @brief Make a vector of sample indices for training based on the batch_size and the number of epochs @param[in] batch_size @param[in] n_epochs @returns a 1D Tensor of sample indices */ Eigen::Tensor<int, 1> getTrainingIndices(const int& batch_size, const int& n_epochs); /** @brief Make a vector of sample indices for validation based on the batch_size and the number of epochs @param[in] batch_size @param[in] n_epochs @returns a 1D Tensor of sample indices */ Eigen::Tensor<int, 1> getValidationIndices(const int& batch_size, const int& n_epochs); std::vector<TensorT> mnist_labels = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; ///< Training/test/validation label numbers Eigen::Tensor<TensorT, 2> training_data; ///< Training pixels with dimensions dim 0: sample; dim 1: col - wise pixel intensity Eigen::Tensor<TensorT, 2> validation_data; ///< Validation pixels with dimensions dim 0: sample; dim 1: col - wise pixel intensity Eigen::Tensor<TensorT, 2> training_labels; ///< Training labels with dimensions dim 0: sample; dim 1: class label Eigen::Tensor<TensorT, 2> validation_labels; ///< Validation labels with dimensions dim 0: sample; dim 1: class label private: // Internal iterators int mnist_sample_start_training = 0; int mnist_sample_end_training = 0; int mnist_sample_start_validation = 0; int mnist_sample_end_validation = 0; }; template<typename TensorT> inline int MNISTSimulator<TensorT>::ReverseInt(int i) { unsigned char ch1, ch2, ch3, ch4; ch1 = i & 255; ch2 = (i >> 8) & 255; ch3 = (i >> 16) & 255; ch4 = (i >> 24) & 255; return((int)ch1 << 24) + ((int)ch2 << 16) + ((int)ch3 << 8) + ch4; } template<typename TensorT> inline void MNISTSimulator<TensorT>::ReadMNIST(const std::string& filename, Eigen::Tensor<TensorT, 2>& data, const bool& is_labels) { // dims: sample, pixel intensity or sample, label // e.g., pixel data dims: 1000 x (28x28) (stored row-wise; returned col-wise) // e.g., label data dims: 1000 x 1 // open up the file std::ifstream file(filename, std::ios::binary); if (file.is_open()) { int magic_number = 0; int number_of_images = 0; int n_rows = 0; int n_cols = 0; // get the magic number file.read((char*)&magic_number, sizeof(magic_number)); magic_number = ReverseInt(magic_number); // get the number of images file.read((char*)&number_of_images, sizeof(number_of_images)); number_of_images = ReverseInt(number_of_images); if (number_of_images > data.dimension(0)) number_of_images = data.dimension(0); // get the number of rows and cols if (!is_labels) { file.read((char*)&n_rows, sizeof(n_rows)); n_rows = ReverseInt(n_rows); file.read((char*)&n_cols, sizeof(n_cols)); n_cols = ReverseInt(n_cols); } else { n_rows = 1; n_cols = 1; } // get the actual data (read row-wise) for (int i = 0; i < number_of_images; ++i) { for (int r = 0; r < n_rows; ++r) { for (int c = 0; c < n_cols; ++c) { unsigned char temp = 0; file.read((char*)&temp, sizeof(temp)); //data(i, (n_rows*r) + c) = (TensorT)temp; // row-wise return data(i, (n_cols*c) + r) = (TensorT)temp; // col-wise return } } } } } template<typename TensorT> inline void MNISTSimulator<TensorT>::readData(const std::string& filename_data, const std::string& filename_labels, const bool& is_training, const int& data_size, const int& input_size) { // Read input images [BUG FREE] Eigen::Tensor<TensorT, 2> input_data(data_size, input_size); ReadMNIST(filename_data, input_data, false); // Read input label [BUG FREE] Eigen::Tensor<TensorT, 2> labels(data_size, 1); ReadMNIST(filename_labels, labels, true); // Convert labels to 1 hot encoding [BUG FREE] Eigen::Tensor<TensorT, 2> labels_encoded = OneHotEncoder<TensorT, TensorT>(labels, mnist_labels); if (is_training) { training_data = input_data; training_labels = labels_encoded; } else { validation_data = input_data; validation_labels = labels_encoded; } } template<typename TensorT> inline void MNISTSimulator<TensorT>::smoothLabels(const TensorT& zero_offset, const TensorT& one_offset) { training_labels = training_labels.unaryExpr(LabelSmoother<TensorT>(zero_offset, one_offset)); validation_labels = validation_labels.unaryExpr(LabelSmoother<TensorT>(zero_offset, one_offset)); }; template<typename TensorT> inline void MNISTSimulator<TensorT>::unitScaleData() { this->training_data = this->training_data.unaryExpr(UnitScaleFunctor<TensorT>(this->training_data)); this->validation_data = this->validation_data.unaryExpr(UnitScaleFunctor<TensorT>(this->validation_data)); }; template<typename TensorT> inline void MNISTSimulator<TensorT>::centerUnitScaleData() { this->training_data = this->training_data.unaryExpr(LinearScaleFunctor<TensorT>(0, 255, -1, 1)); this->validation_data = this->validation_data.unaryExpr(LinearScaleFunctor<TensorT>(0, 255, -1, 1)); } template<typename TensorT> inline void MNISTSimulator<TensorT>::corruptTrainingData(const TensorT& fraction_corruption) { // iterate through each sample and apply the corruption to the pixel dimensions for (int i = 0; i < this->training_data.dimension(0); ++i) { this->training_data.chip(i, 0) = (this->training_data.chip(i, 0).random() + this->training_data.chip(i, 0).constant(TensorT(1)) < this->training_data.chip(i, 0).constant(fraction_corruption * 2)).select( this->training_data.chip(i, 0).constant(TensorT(1)), this->training_data.chip(i, 0)); } } template<typename TensorT> inline Eigen::Tensor<int, 1>MNISTSimulator<TensorT>::getTrainingIndices(const int& batch_size, const int& n_epochs) { // make a vector of sample_indices [BUG FREE] this->mnist_sample_start_training = this->mnist_sample_end_training; Eigen::Tensor<int, 1> sample_indices(batch_size*n_epochs); int sample_index = this->mnist_sample_start_training; for (int i = 0; i < batch_size*n_epochs; ++i) { if (sample_index > this->training_data.dimension(0) - 1) { sample_index = 0; } sample_indices(i) = sample_index; ++sample_index; } this->mnist_sample_end_training = sample_index; return sample_indices; } template<typename TensorT> inline Eigen::Tensor<int, 1> MNISTSimulator<TensorT>::getValidationIndices(const int& batch_size, const int& n_epochs) { // make a vector of sample_indices [BUG FREE] this->mnist_sample_start_validation = this->mnist_sample_end_validation; Eigen::Tensor<int, 1> sample_indices(batch_size*n_epochs); int sample_index = this->mnist_sample_start_validation; for (int i = 0; i < batch_size*n_epochs; ++i) { if (sample_index > this->validation_data.dimension(0) - 1) { sample_index = 0; } sample_indices(i) = sample_index; ++sample_index; } this->mnist_sample_end_validation = sample_index; return sample_indices; } }; #endif //EVONET_MNISTSIMULATOR_H<file_sep>#------------------------------------------------------------------------------ # This cmake file handles finding external libs for SmartPeak #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # set which library extensions are preferred (we prefer shared libraries) if(NOT MSVC) set(CMAKE_FIND_LIBRARY_SUFFIXES ".so;.a") endif() if (APPLE) set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib;.a") endif() #------------------------------------------------------------------------------ # find libs (for linking) # On Windows: # * on windows we need the *.lib versions (dlls alone won't do for linking) # * never mix Release/Debug versions of libraries. Leads to strange segfaults, # stack corruption etc, due to different runtime libs ... #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Find eigen3 #------------------------------------------------------------------------------ find_package(Eigen3 3.1.0 REQUIRED) #------------------------------------------------------------------------------ # Find cereal #------------------------------------------------------------------------------ find_package(Cereal REQUIRED) ##------------------------------------------------------------------------------ ## Find cub ##------------------------------------------------------------------------------ #find_package(Cub 1.8.0 REQUIRED) #------------------------------------------------------------------------------ # Done finding contrib libraries #------------------------------------------------------------------------------ #except for the contrib libs, prefer shared libraries if(NOT MSVC AND NOT APPLE) set(CMAKE_FIND_LIBRARY_SUFFIXES ".so;.a") endif() <file_sep>### the directory name set(directory include/EvoNet/ml) ### list all header files of the directory here set(sources_list_h ActivationFunction.h ActivationFunctionTensor.h IntegrationFunction.h IntegrationFunctionTensor.h Interpreter.h LossFunction.h LossFunctionTensor.h Link.h MetricFunction.h MetricFunctionTensor.h Model.h ModelBuilder.h ModelBuilderExperimental.h ModelInterpreter.h ModelInterpreterDefaultDevice.h ModelInterpreterGpu.h ModelKernal.h ModelKernalGpu.h ModelLogger.h ModelReplicator.h ModelReplicatorExperimental.h ModelTrainer.h ModelTrainerDefaultDevice.h ModelTrainerExperimental.h ModelTrainerExperimentalDefaultDevice.h ModelTrainerExperimentalGpu.h ModelTrainerGpu.h Node.h NodeTensorData.h OpToTensorOp.h PopulationLogger.h PopulationTrainer.h PopulationTrainerDefaultDevice.h PopulationTrainerExperimental.h PopulationTrainerExperimentalDefaultDevice.h PopulationTrainerExperimentalGpu.h PopulationTrainerGpu.h Solver.h SolverTensor.h Weight.h WeightInit.h WeightTensorData.h ) ### add path to the filenames set(sources_h) foreach(i ${sources_list_h}) list(APPEND sources_h ${directory}/${i}) endforeach(i) ### source group definition source_group("Header Files\\EvoNet\\ml" FILES ${sources_h}) set(EvoNet_sources_h ${EvoNet_sources_h} ${sources_h}) <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE OpToTensorOp test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/OpToTensorOp.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(OpToTensorOp1) BOOST_AUTO_TEST_CASE(constructorActivationOpToActivationTensorOp) { ActivationOpToActivationTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ActivationOpToActivationTensorOp<float, Eigen::DefaultDevice>* nullPointer = nullptr; ptr = new ActivationOpToActivationTensorOp<float, Eigen::DefaultDevice>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorActivationOpToActivationTensorOp) { ActivationOpToActivationTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ptr = new ActivationOpToActivationTensorOp<float, Eigen::DefaultDevice>(); delete ptr; } BOOST_AUTO_TEST_CASE(convertOpToTensorOpActivationOpToActivationTensorOp) { ActivationOpToActivationTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<ActivationOp<float>> op_class; std::shared_ptr<ActivationTensorOp<float, Eigen::DefaultDevice>> op_tensor_class; op_class = std::make_shared<ReLUOp<float>>(ReLUOp<float>(1, 2, 3)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ReLUTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ReLUGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<ELUOp<float>>(ELUOp<float>(1.0f, 2.0f, 3.0f, 4.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ELUTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<ELUGradOp<float>>(ELUGradOp<float>(1.0f, 2.0f, 3.0f, 4.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ELUGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<SigmoidOp<float>>(SigmoidOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SigmoidTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<SigmoidGradOp<float>>(SigmoidGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SigmoidGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<TanHOp<float>>(TanHOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "TanHTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<TanHGradOp<float>>(TanHGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "TanHGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<ReTanHOp<float>>(ReTanHOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ReTanHTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<ReTanHGradOp<float>>(ReTanHGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ReTanHGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<LinearOp<float>>(LinearOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "LinearTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<LinearGradOp<float>>(LinearGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "LinearGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<InverseOp<float>>(InverseOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "InverseTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<InverseGradOp<float>>(InverseGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "InverseGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<ExponentialOp<float>>(ExponentialOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ExponentialTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<ExponentialGradOp<float>>(ExponentialGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ExponentialGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<LogOp<float>>(LogOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "LogTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<LogGradOp<float>>(LogGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "LogGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<PowOp<float>>(PowOp<float>(1.0f,2.0f,3.0f,2.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "PowTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<PowGradOp<float>>(PowGradOp<float>(1.0f, 2.0f, 3.0f, 2.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "PowGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<LeakyReLUOp<float>>(LeakyReLUOp<float>(1.0f, 2.0f, 3.0f, 4.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "LeakyReLUTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<LeakyReLUGradOp<float>>(LeakyReLUGradOp<float>(1.0f, 2.0f, 3.0f, 4.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "LeakyReLUGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<SinOp<float>>(SinOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SinTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<SinGradOp<float>>(SinGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SinGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<CosOp<float>>(CosOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "CosTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<CosGradOp<float>>(CosGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "CosGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<BatchNormOp<float>>(BatchNormOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "BatchNormTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); op_class = std::make_shared<BatchNormGradOp<float>>(BatchNormGradOp<float>(1.0f, 2.0f, 3.0f)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "BatchNormGradTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getEps(), 1); BOOST_CHECK_EQUAL(op_tensor_class->getMin(), 2); BOOST_CHECK_EQUAL(op_tensor_class->getMax(), 3); } BOOST_AUTO_TEST_CASE(getTensorParamsActivationOpToActivationTensorOp) { ActivationOpToActivationTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<ActivationOp<float>> op_class = std::make_shared<ReLUOp<float>>(ReLUOp<float>()); std::vector<float> params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ELUOp<float>>(ELUOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ELUGradOp<float>>(ELUGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<SigmoidOp<float>>(SigmoidOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<SigmoidGradOp<float>>(SigmoidGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<TanHOp<float>>(TanHOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ReTanHOp<float>>(ReTanHOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ReTanHGradOp<float>>(ReTanHGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<LinearOp<float>>(LinearOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<InverseOp<float>>(InverseOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<InverseGradOp<float>>(InverseGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ExponentialOp<float>>(ExponentialOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ExponentialGradOp<float>>(ExponentialGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<LogOp<float>>(LogOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<LogGradOp<float>>(LogGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<PowOp<float>>(PowOp<float>(2)); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<PowGradOp<float>>(PowGradOp<float>(2)); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<LeakyReLUOp<float>>(LeakyReLUOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<LeakyReLUGradOp<float>>(LeakyReLUGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<SinOp<float>>(SinOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<SinGradOp<float>>(SinGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<CosOp<float>>(CosOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<CosGradOp<float>>(CosGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<BatchNormOp<float>>(BatchNormOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<BatchNormGradOp<float>>(BatchNormGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); } BOOST_AUTO_TEST_CASE(constructorSolverOpToSolverTensorOp) { SolverOpToSolverTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; SolverOpToSolverTensorOp<float, Eigen::DefaultDevice>* nullPointer = nullptr; ptr = new SolverOpToSolverTensorOp<float, Eigen::DefaultDevice>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorSolverOpToSolverTensorOp) { SolverOpToSolverTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ptr = new SolverOpToSolverTensorOp<float, Eigen::DefaultDevice>(); delete ptr; } BOOST_AUTO_TEST_CASE(convertOpToTensorOpSolverOpToSolverTensorOp) { SolverOpToSolverTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<SolverOp<float>> op_class; std::shared_ptr<SolverTensorOp<float, Eigen::DefaultDevice>> op_tensor_class; op_class = std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9, 10.0, 1.0, 0.55)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SGDTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getGradientThreshold(), 10); BOOST_CHECK_EQUAL(op_tensor_class->getGradientNoiseSigma(), 1); BOOST_CHECK_CLOSE(op_tensor_class->getGradientNoiseGamma(), 0.55, 1e-4); op_class = std::make_shared<SSDOp<float>>(SSDOp<float>(0.1, 0.9, 10.0, 1.0, 0.55)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SSDTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getGradientThreshold(), 10); BOOST_CHECK_EQUAL(op_tensor_class->getGradientNoiseSigma(), 1); BOOST_CHECK_CLOSE(op_tensor_class->getGradientNoiseGamma(), 0.55, 1e-4); op_class = std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10.0, 1.0, 0.55)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "AdamTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getGradientThreshold(), 10); BOOST_CHECK_EQUAL(op_tensor_class->getGradientNoiseSigma(), 1); BOOST_CHECK_CLOSE(op_tensor_class->getGradientNoiseGamma(), 0.55, 1e-4); op_class = std::make_shared<SVAGOp<float>>(SVAGOp<float>(0.001, 0.9,10.0, 1.0, 0.55)); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SVAGTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getGradientThreshold(), 10); BOOST_CHECK_EQUAL(op_tensor_class->getGradientNoiseSigma(), 1); BOOST_CHECK_CLOSE(op_tensor_class->getGradientNoiseGamma(), 0.55, 1e-4); op_class = std::make_shared<DummySolverOp<float>>(DummySolverOp<float>()); op_class->setGradientThreshold(10); op_class->setGradientNoiseSigma(1); op_class->setGradientNoiseGamma(0.55); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "DummySolverTensorOp"); BOOST_CHECK_EQUAL(op_tensor_class->getGradientThreshold(), 10); BOOST_CHECK_EQUAL(op_tensor_class->getGradientNoiseSigma(), 1); BOOST_CHECK_CLOSE(op_tensor_class->getGradientNoiseGamma(), 0.55, 1e-4); } BOOST_AUTO_TEST_CASE(getTensorParamsSolverOpToSolverTensorOp) { SolverOpToSolverTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<SolverOp<float>> op_class = std::make_shared<SGDOp<float>>(SGDOp<float>(1, 2)); std::vector<float> params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 3); BOOST_CHECK_EQUAL(params[0], 1); BOOST_CHECK_EQUAL(params[1], 2); BOOST_CHECK_EQUAL(params[2], 0); op_class = std::make_shared<SSDOp<float>>(SSDOp<float>(1, 2)); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 3); BOOST_CHECK_EQUAL(params[0], 1); BOOST_CHECK_EQUAL(params[1], 2); BOOST_CHECK_EQUAL(params[2], 0); op_class = std::make_shared<AdamOp<float>>(AdamOp<float>(1, 2, 3, 4)); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 6); BOOST_CHECK_EQUAL(params[0], 1); BOOST_CHECK_EQUAL(params[1], 2); BOOST_CHECK_EQUAL(params[2], 3); BOOST_CHECK_EQUAL(params[3], 4); BOOST_CHECK_EQUAL(params[4], 0); BOOST_CHECK_EQUAL(params[5], 0); op_class = std::make_shared<SVAGOp<float>>(SVAGOp<float>(1, 2)); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 4); BOOST_CHECK_EQUAL(params[0], 1); BOOST_CHECK_EQUAL(params[1], 2); BOOST_CHECK_EQUAL(params[2], 0); BOOST_CHECK_EQUAL(params[3], 0); op_class = std::make_shared<DummySolverOp<float>>(DummySolverOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); } BOOST_AUTO_TEST_CASE(constructorIntegrationOpToIntegrationTensorOp) { IntegrationOpToIntegrationTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; IntegrationOpToIntegrationTensorOp<float, Eigen::DefaultDevice>* nullPointer = nullptr; ptr = new IntegrationOpToIntegrationTensorOp<float, Eigen::DefaultDevice>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorIntegrationOpToIntegrationTensorOp) { IntegrationOpToIntegrationTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ptr = new IntegrationOpToIntegrationTensorOp<float, Eigen::DefaultDevice>(); delete ptr; } BOOST_AUTO_TEST_CASE(convertOpToTensorOpIntegrationOpToIntegrationTensorOp) { IntegrationOpToIntegrationTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<IntegrationOp<float>> op_class; std::shared_ptr<IntegrationTensorOp<float, Eigen::DefaultDevice>> op_tensor_class; op_class = std::make_shared<SumOp<float>>(SumOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SumTensorOp"); op_class = std::make_shared<ProdOp<float>>(ProdOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ProdTensorOp"); op_class = std::make_shared<ProdSCOp<float>>(ProdSCOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ProdSCTensorOp"); op_class = std::make_shared<MaxOp<float>>(MaxOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MaxTensorOp"); op_class = std::make_shared<MinOp<float>>(MinOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MinTensorOp"); //op_class = std::make_shared<VarOp<float>>(VarOp<float>()); //TODO... //op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); //BOOST_CHECK_EQUAL(op_tensor_class->getName(), "VarTensorOp"); op_class = std::make_shared<CountOp<float>>(CountOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "CountTensorOp"); op_class = std::make_shared<VarModOp<float>>(VarModOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "VarModTensorOp"); } BOOST_AUTO_TEST_CASE(getTensorParamsIntegrationOpToIntegrationTensorOp) { IntegrationOpToIntegrationTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<IntegrationOp<float>> op_class = std::make_shared<SumOp<float>>(SumOp<float>()); std::vector<float> params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ProdOp<float>>(ProdOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ProdSCOp<float>>(ProdSCOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MaxOp<float>>(MaxOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MinOp<float>>(MinOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MeanOp<float>>(MeanOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); //op_class = std::make_shared<VarOp<float>>(VarOp<float>()); //params = op_to_tensor_op.getTensorParams(op_class); //BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<CountOp<float>>(CountOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<VarModOp<float>>(VarModOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); } BOOST_AUTO_TEST_CASE(constructorIntegrationErrorOpToIntegrationErrorTensorOp) { IntegrationErrorOpToIntegrationErrorTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; IntegrationErrorOpToIntegrationErrorTensorOp<float, Eigen::DefaultDevice>* nullPointer = nullptr; ptr = new IntegrationErrorOpToIntegrationErrorTensorOp<float, Eigen::DefaultDevice>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorIntegrationErrorOpToIntegrationErrorTensorOp) { IntegrationErrorOpToIntegrationErrorTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ptr = new IntegrationErrorOpToIntegrationErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptr; } BOOST_AUTO_TEST_CASE(convertOpToTensorOpIntegrationErrorOpToIntegrationErrorTensorOp) { IntegrationErrorOpToIntegrationErrorTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<IntegrationErrorOp<float>> op_class; std::shared_ptr<IntegrationErrorTensorOp<float, Eigen::DefaultDevice>> op_tensor_class; op_class = std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SumErrorTensorOp"); op_class = std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ProdErrorTensorOp"); op_class = std::make_shared<MaxErrorOp<float>>(MaxErrorOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MaxErrorTensorOp"); op_class = std::make_shared<MinErrorOp<float>>(MinErrorOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MinErrorTensorOp"); op_class = std::make_shared<MeanErrorOp<float>>(MeanErrorOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MeanErrorTensorOp"); //op_class = std::make_shared<VarErrorOp<float>>(VarErrorOp<float>()); //TODO... //op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); //BOOST_CHECK_EQUAL(op_tensor_class->getName(), "VarErrorTensorOp"); op_class = std::make_shared<CountErrorOp<float>>(CountErrorOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "CountErrorTensorOp"); op_class = std::make_shared<VarModErrorOp<float>>(VarModErrorOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "VarModErrorTensorOp"); } BOOST_AUTO_TEST_CASE(getTensorParamsIntegrationErrorOpToIntegrationErrorTensorOp) { IntegrationErrorOpToIntegrationErrorTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<IntegrationErrorOp<float>> op_class = std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()); std::vector<float> params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MaxErrorOp<float>>(MaxErrorOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MinErrorOp<float>>(MinErrorOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MeanErrorOp<float>>(MeanErrorOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); //op_class = std::make_shared<VarErrorOp<float>>(VarErrorOp<float>()); //params = op_to_tensor_op.getTensorParams(op_class); //BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<CountErrorOp<float>>(CountErrorOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<VarModErrorOp<float>>(VarModErrorOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); } BOOST_AUTO_TEST_CASE(constructorIntegrationWeightGradOpToIntegrationWeightGradTensorOp) { IntegrationWeightGradOpToIntegrationWeightGradTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; IntegrationWeightGradOpToIntegrationWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointer = nullptr; ptr = new IntegrationWeightGradOpToIntegrationWeightGradTensorOp<float, Eigen::DefaultDevice>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorIntegrationWeightGradOpToIntegrationWeightGradTensorOp) { IntegrationWeightGradOpToIntegrationWeightGradTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ptr = new IntegrationWeightGradOpToIntegrationWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptr; } BOOST_AUTO_TEST_CASE(convertOpToTensorOpIntegrationWeightGradOpToIntegrationWeightGradTensorOp) { IntegrationWeightGradOpToIntegrationWeightGradTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<IntegrationWeightGradOp<float>> op_class; std::shared_ptr<IntegrationWeightGradTensorOp<float, Eigen::DefaultDevice>> op_tensor_class; op_class = std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "SumWeightGradTensorOp"); op_class = std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ProdWeightGradTensorOp"); op_class = std::make_shared<MaxWeightGradOp<float>>(MaxWeightGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MaxWeightGradTensorOp"); op_class = std::make_shared<MinWeightGradOp<float>>(MinWeightGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MinWeightGradTensorOp"); op_class = std::make_shared<MeanWeightGradOp<float>>(MeanWeightGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MeanWeightGradTensorOp"); //op_class = std::make_shared<VarWeightGradOp<float>>(VarWeightGradOp<float>()); //TODO... //op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); //BOOST_CHECK_EQUAL(op_tensor_class->getName(), "VarWeightGradTensorOp"); op_class = std::make_shared<CountWeightGradOp<float>>(CountWeightGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "CountWeightGradTensorOp"); op_class = std::make_shared<VarModWeightGradOp<float>>(VarModWeightGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "VarModWeightGradTensorOp"); } BOOST_AUTO_TEST_CASE(getTensorParamsIntegrationWeightGradOpToIntegrationWeightGradTensorOp) { IntegrationWeightGradOpToIntegrationWeightGradTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<IntegrationWeightGradOp<float>> op_class = std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()); std::vector<float> params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MaxWeightGradOp<float>>(MaxWeightGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MinWeightGradOp<float>>(MinWeightGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MeanWeightGradOp<float>>(MeanWeightGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); //op_class = std::make_shared<VarWeightGradOp<float>>(VarWeightGradOp<float>()); //params = op_to_tensor_op.getTensorParams(op_class); //BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<CountWeightGradOp<float>>(CountWeightGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<VarModWeightGradOp<float>>(VarModWeightGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); } BOOST_AUTO_TEST_CASE(constructorLossFunctionGradOpToLossFunctionGradTensorOp) { LossFunctionGradOpToLossFunctionGradTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; LossFunctionGradOpToLossFunctionGradTensorOp<float, Eigen::DefaultDevice>* nullPointer = nullptr; ptr = new LossFunctionGradOpToLossFunctionGradTensorOp<float, Eigen::DefaultDevice>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorLossFunctionGradOpToLossFunctionGradTensorOp) { LossFunctionGradOpToLossFunctionGradTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ptr = new LossFunctionGradOpToLossFunctionGradTensorOp<float, Eigen::DefaultDevice>(); delete ptr; } BOOST_AUTO_TEST_CASE(convertOpToTensorOpLossFunctionGradOpToLossFunctionGradTensorOp) { LossFunctionGradOpToLossFunctionGradTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<LossFunctionGradOp<float>> op_class; std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> op_tensor_class; op_class = std::make_shared<ManhattanDistanceLossGradOp<float>>(ManhattanDistanceLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ManhattanDistanceLossGradTensorOp"); op_class = std::make_shared<L2NormLossGradOp<float>>(L2NormLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "L2NormLossGradTensorOp"); op_class = std::make_shared<NegativeLogLikelihoodLossGradOp<float>>(NegativeLogLikelihoodLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "NegativeLogLikelihoodLossGradTensorOp"); op_class = std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MSELossGradTensorOp"); op_class = std::make_shared<MAELossGradOp<float>>(MAELossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MAELossGradTensorOp"); op_class = std::make_shared<MRSELossGradOp<float>>(MRSELossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MRSELossGradTensorOp"); op_class = std::make_shared<MLELossGradOp<float>>(MLELossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MLELossGradTensorOp"); op_class = std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "KLDivergenceMuLossGradTensorOp"); op_class = std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "KLDivergenceLogVarLossGradTensorOp"); op_class = std::make_shared<BCEWithLogitsLossGradOp<float>>(BCEWithLogitsLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "BCEWithLogitsLossGradTensorOp"); op_class = std::make_shared<CrossEntropyWithLogitsLossGradOp<float>>(CrossEntropyWithLogitsLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "CrossEntropyWithLogitsLossGradTensorOp"); op_class = std::make_shared<MSERangeLBLossGradOp<float>>(MSERangeLBLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MSERangeLBLossGradTensorOp"); op_class = std::make_shared<MSERangeUBLossGradOp<float>>(MSERangeUBLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MSERangeUBLossGradTensorOp"); op_class = std::make_shared<KLDivergenceCatLossGradOp<float>>(KLDivergenceCatLossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "KLDivergenceCatLossGradTensorOp"); op_class = std::make_shared<MAPELossGradOp<float>>(MAPELossGradOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MAPELossGradTensorOp"); } BOOST_AUTO_TEST_CASE(getTensorParamsLossFunctionGradOpToLossFunctionGradTensorOp) { LossFunctionGradOpToLossFunctionGradTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<LossFunctionGradOp<float>> op_class; std::vector<float> params; op_class = std::make_shared<ManhattanDistanceLossGradOp<float>>(ManhattanDistanceLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<L2NormLossGradOp<float>>(L2NormLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<NegativeLogLikelihoodLossGradOp<float>>(NegativeLogLikelihoodLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MAELossGradOp<float>>(MAELossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MRSELossGradOp<float>>(MRSELossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MLELossGradOp<float>>(MLELossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<BCEWithLogitsLossGradOp<float>>(BCEWithLogitsLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<CrossEntropyWithLogitsLossGradOp<float>>(CrossEntropyWithLogitsLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MSERangeLBLossGradOp<float>>(MSERangeLBLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MSERangeUBLossGradOp<float>>(MSERangeUBLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<KLDivergenceCatLossGradOp<float>>(KLDivergenceCatLossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MAPELossGradOp<float>>(MAPELossGradOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); } BOOST_AUTO_TEST_CASE(constructorLossFunctionOpToLossFunctionTensorOp) { LossFunctionOpToLossFunctionTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; LossFunctionOpToLossFunctionTensorOp<float, Eigen::DefaultDevice>* nullPointer = nullptr; ptr = new LossFunctionOpToLossFunctionTensorOp<float, Eigen::DefaultDevice>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorLossFunctionOpToLossFunctionTensorOp) { LossFunctionOpToLossFunctionTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ptr = new LossFunctionOpToLossFunctionTensorOp<float, Eigen::DefaultDevice>(); delete ptr; } BOOST_AUTO_TEST_CASE(convertOpToTensorOpLossFunctionOpToLossFunctionTensorOp) { LossFunctionOpToLossFunctionTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<LossFunctionOp<float>> op_class; std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> op_tensor_class; op_class = std::make_shared<ManhattanDistanceLossOp<float>>(ManhattanDistanceLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ManhattanDistanceLossTensorOp"); op_class = std::make_shared<L2NormLossOp<float>>(L2NormLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "L2NormLossTensorOp"); op_class = std::make_shared<NegativeLogLikelihoodLossOp<float>>(NegativeLogLikelihoodLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "NegativeLogLikelihoodLossTensorOp"); op_class = std::make_shared<MSELossOp<float>>(MSELossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MSELossTensorOp"); op_class = std::make_shared<MAELossOp<float>>(MAELossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MAELossTensorOp"); op_class = std::make_shared<MRSELossOp<float>>(MRSELossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MRSELossTensorOp"); op_class = std::make_shared<MLELossOp<float>>(MLELossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MLELossTensorOp"); op_class = std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "KLDivergenceMuLossTensorOp"); op_class = std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "KLDivergenceLogVarLossTensorOp"); op_class = std::make_shared<BCEWithLogitsLossOp<float>>(BCEWithLogitsLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "BCEWithLogitsLossTensorOp"); op_class = std::make_shared<CrossEntropyWithLogitsLossOp<float>>(CrossEntropyWithLogitsLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "CrossEntropyWithLogitsLossTensorOp"); op_class = std::make_shared<MSERangeLBLossOp<float>>(MSERangeLBLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MSERangeLBLossTensorOp"); op_class = std::make_shared<MSERangeUBLossOp<float>>(MSERangeUBLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MSERangeUBLossTensorOp"); op_class = std::make_shared<KLDivergenceCatLossOp<float>>(KLDivergenceCatLossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "KLDivergenceCatLossTensorOp"); op_class = std::make_shared<MAPELossOp<float>>(MAPELossOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MAPELossTensorOp"); } BOOST_AUTO_TEST_CASE(getTensorParamsLossFunctionOpToLossFunctionTensorOp) { LossFunctionOpToLossFunctionTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<LossFunctionOp<float>> op_class; std::vector<float> params; op_class = std::make_shared<ManhattanDistanceLossOp<float>>(ManhattanDistanceLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<L2NormLossOp<float>>(L2NormLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<NegativeLogLikelihoodLossOp<float>>(NegativeLogLikelihoodLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MSELossOp<float>>(MSELossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MAELossOp<float>>(MAELossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MRSELossOp<float>>(MRSELossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MLELossOp<float>>(MLELossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<BCEWithLogitsLossOp<float>>(BCEWithLogitsLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<CrossEntropyWithLogitsLossOp<float>>(CrossEntropyWithLogitsLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MSERangeLBLossOp<float>>(MSERangeLBLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MSERangeUBLossOp<float>>(MSERangeUBLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<KLDivergenceCatLossOp<float>>(KLDivergenceCatLossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MAPELossOp<float>>(MAPELossOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); } BOOST_AUTO_TEST_CASE(constructorMetricFunctionOpToMetricFunctionTensorOp) { MetricFunctionOpToMetricFunctionTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; MetricFunctionOpToMetricFunctionTensorOp<float, Eigen::DefaultDevice>* nullPointer = nullptr; ptr = new MetricFunctionOpToMetricFunctionTensorOp<float, Eigen::DefaultDevice>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorMetricFunctionOpToMetricFunctionTensorOp) { MetricFunctionOpToMetricFunctionTensorOp<float, Eigen::DefaultDevice>* ptr = nullptr; ptr = new MetricFunctionOpToMetricFunctionTensorOp<float, Eigen::DefaultDevice>(); delete ptr; } BOOST_AUTO_TEST_CASE(convertOpToTensorOpMetricFunctionOpToMetricFunctionTensorOp) { MetricFunctionOpToMetricFunctionTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<MetricFunctionOp<float>> op_class; std::shared_ptr<MetricFunctionTensorOp<float, Eigen::DefaultDevice>> op_tensor_class; op_class = std::make_shared<AccuracyBCOp<float>>(AccuracyBCOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "AccuracyBCTensorOp"); op_class = std::make_shared<AccuracyMCMicroOp<float>>(AccuracyMCMicroOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "AccuracyMCMicroTensorOp"); op_class = std::make_shared<AccuracyMCMacroOp<float>>(AccuracyMCMacroOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "AccuracyMCMacroTensorOp"); op_class = std::make_shared<PrecisionBCOp<float>>(PrecisionBCOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "PrecisionBCTensorOp"); op_class = std::make_shared<PrecisionMCMicroOp<float>>(PrecisionMCMicroOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "PrecisionMCMicroTensorOp"); op_class = std::make_shared<PrecisionMCMacroOp<float>>(PrecisionMCMacroOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "PrecisionMCMacroTensorOp"); op_class = std::make_shared<RecallBCOp<float>>(RecallBCOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "RecallBCTensorOp"); op_class = std::make_shared<RecallMCMicroOp<float>>(RecallMCMicroOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "RecallMCMicroTensorOp"); op_class = std::make_shared<RecallMCMacroOp<float>>(RecallMCMacroOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "RecallMCMacroTensorOp"); op_class = std::make_shared<F1ScoreBCOp<float>>(F1ScoreBCOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "F1ScoreBCTensorOp"); op_class = std::make_shared<F1ScoreMCMicroOp<float>>(F1ScoreMCMicroOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "F1ScoreMCMicroTensorOp"); op_class = std::make_shared<F1ScoreMCMacroOp<float>>(F1ScoreMCMacroOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "F1ScoreMCMacroTensorOp"); op_class = std::make_shared<MAEOp<float>>(MAEOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "MAETensorOp"); op_class = std::make_shared<CosineSimilarityOp<float>>(CosineSimilarityOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "CosineSimilarityTensorOp"); op_class = std::make_shared<PearsonROp<float>>(PearsonROp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "PearsonRTensorOp"); op_class = std::make_shared<EuclideanDistOp<float>>(EuclideanDistOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "EuclideanDistTensorOp"); op_class = std::make_shared<ManhattanDistOp<float>>(ManhattanDistOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "ManhattanDistTensorOp"); op_class = std::make_shared<LogarithmicDistOp<float>>(LogarithmicDistOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "LogarithmicDistTensorOp"); op_class = std::make_shared<JeffreysAndMatusitaDistOp<float>>(JeffreysAndMatusitaDistOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "JeffreysAndMatusitaDistTensorOp"); op_class = std::make_shared<PercentDifferenceOp<float>>(PercentDifferenceOp<float>()); op_tensor_class = op_to_tensor_op.convertOpToTensorOp(op_class); BOOST_CHECK_EQUAL(op_tensor_class->getName(), "PercentDifferenceTensorOp"); } BOOST_AUTO_TEST_CASE(getTensorParamsMetricFunctionOpToMetricFunctionTensorOp) { MetricFunctionOpToMetricFunctionTensorOp<float, Eigen::DefaultDevice> op_to_tensor_op; std::shared_ptr<MetricFunctionOp<float>> op_class; std::vector<float> params; op_class = std::make_shared<AccuracyBCOp<float>>(AccuracyBCOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<AccuracyMCMicroOp<float>>(AccuracyMCMicroOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<AccuracyMCMacroOp<float>>(AccuracyMCMacroOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<PrecisionBCOp<float>>(PrecisionBCOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<PrecisionMCMicroOp<float>>(PrecisionMCMicroOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<PrecisionMCMacroOp<float>>(PrecisionMCMacroOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<RecallBCOp<float>>(RecallBCOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<RecallMCMicroOp<float>>(RecallMCMicroOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<RecallMCMacroOp<float>>(RecallMCMacroOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<F1ScoreBCOp<float>>(F1ScoreBCOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<F1ScoreMCMicroOp<float>>(F1ScoreMCMicroOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<F1ScoreMCMacroOp<float>>(F1ScoreMCMacroOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<MAEOp<float>>(MAEOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<CosineSimilarityOp<float>>(CosineSimilarityOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<PearsonROp<float>>(PearsonROp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<EuclideanDistOp<float>>(EuclideanDistOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<ManhattanDistOp<float>>(ManhattanDistOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<LogarithmicDistOp<float>>(LogarithmicDistOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<JeffreysAndMatusitaDistOp<float>>(JeffreysAndMatusitaDistOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); op_class = std::make_shared<PercentDifferenceOp<float>>(PercentDifferenceOp<float>()); params = op_to_tensor_op.getTensorParams(op_class); BOOST_CHECK_EQUAL(params.size(), 0); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELBUILDER_H #define EVONET_MODELBUILDER_H // .h #include <EvoNet/ml/Model.h> #include <unsupported/Eigen/CXX11/Tensor> // .cpp #include <EvoNet/core/Preprocessing.h> namespace EvoNet { /** @brief Class to help create complex network models NOTE: the ModelInterpreter class arranges the Tensor layers according to node name ascending order. Therefore, the node name indices are buffered with 0's of length 12 to ensure proper sorting of nodes within a tensor layer. */ template<typename TensorT> class ModelBuilder { public: ModelBuilder() = default; ///< Default constructor ~ModelBuilder() = default; ///< Default destructor /** @brief Add inputs nodes to an empty model @param[in, out] model An empty model @param[in] names Prefix name to use for the nodes @param[in] n_nodes The number of output nodes @returns vector of output node names */ std::vector<std::string> addInputNodes(Model<TensorT>& model, const std::string& name, const std::string & module_name, const int& n_nodes, const bool& specify_layer = false); /** @brief Add a fully connected layer to a model @param[in, out] Model @param[in] source_node_names Node_names to add the fully connected layer to @param[in] n_nodes The number of output nodes @param[in] node_activation The activation function of the hidden node to create @param[in] node_activation_grad The activation function gradient of the hidden node to create @param[in] node_integration The integration function of the hidden node to create @param[in] drop_out_prob Node drop out probability @param[in] drop_connection_prob Weight drop out probability @param[in] biases Whether to include bias nodes or not @returns vector of output node names */ std::vector<std::string> addFullyConnected(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_nodes, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, const bool& specify_layer = false); void addFullyConnected(Model<TensorT>& model, const std::string& module_name, const std::vector<std::string>& source_node_names, const std::vector<std::string>& sink_node_names, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_connection_prob = 0.0f, const bool& specify_layer = false); /** @brief Add a singly connected layer to a model @param[in, out] Model @param[in] source_node_names Node_names to add the singly connected layer to @param[in] n_nodes The number of output nodes @param[in] node_activation The activation function of the hidden node to create @param[in] node_activation_grad The activation function gradient of the hidden node to create @param[in] node_integration The integration function of the hidden node to create @param[in] node_integration_error The integration error function of the hidden node to create @param[in] node_integration_weight_grad The integration weight gradient function of the hidden node to create @param[in] weight_init The weight initialization function @param[in] solver The weight solver @param[in] drop_out_prob Node drop out probability @param[in] drop_connection_prob Weight drop out probability @param[in] biases Whether to include bias nodes or not @returns vector of output node names */ std::vector<std::string> addSinglyConnected(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_nodes, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, const bool& specify_layer = false); void addSinglyConnected(Model<TensorT>& model, const std::string& module_name, const std::vector<std::string>& source_node_names, const std::vector<std::string>& sink_node_names, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_connection_prob = 0.0f, const bool& specify_layer = false); /* @brief Add biases A special case of `addSinglyConnected` where the source nodes are biases @param[in, out] Model @param[in] module_name The module name used when specifying layers @param[in] sink_node_names Node_names to add the singly connected layer to @param[in] weight_init The weight initialization function @param[in] solver The weight solver @param[in] drop_connection_prob Weight drop out probability @param[in] specify_layer Manually specify the layer that the node should be placed on @returns vector of output node names */ std::vector<std::string> addBiases(Model<TensorT>& model, const std::string& module_name, const std::vector<std::string>& sink_node_names, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_connection_prob = 0.0f, const bool& specify_layer = false); /** @brief Add hidden nodes @param[in, out] Model @param[in] n_nodes The number of output nodes @param[in] node_activation The activation function of the hidden node to create @param[in] node_activation_grad The activation function gradient of the hidden node to create @param[in] node_integration The integration function of the hidden node to create @param[in] node_integration_error The integration error function of the hidden node to create @param[in] node_integration_weight_grad The integration weight gradient function of the hidden node to create @param[in] drop_out_prob Node drop out probability @param[in] biases Whether to include bias nodes or not @returns vector of output node names */ std::vector<std::string> addHiddenNodes(Model<TensorT>& model, const std::string& name, const std::string& module_name, const int& n_nodes, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, const bool& specify_layer = false); /** @brief Add a Soft Max def stable_softmax(X): exps = np.exp(X) return exps / np.sum(exps) @param[in, out] Model<TensorT> @param[in] source_node_names Node_names to add the layer to @returns vector of output node names */ std::vector<std::string> addSoftMax(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const bool& specify_layer = false); /** @brief Add a Stable Soft Max def stable_softmax(X): exps = np.exp(X - np.max(X)) return exps / np.sum(exps) @param[in, out] Model @param[in] source_node_names Node_names to add the layer to @returns vector of output node names */ std::vector<std::string> addStableSoftMax(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const bool& specify_layer = false); /** @brief Add a Convolution layer or Pooling layer The input is considered a linearized matrix in column order The output is considered a linearized matrix in column order BUG: addition of bias causes an odd bug in model interpreter Overload is provided to add additional filters that operate over the same input and output nodes @param[in, out] Model @param[in] source_node_names Node_names to add the layer to @param[in] input_width The width of the input @param[in] input_height The height of the input @param[in] input_width_zero_padding Add 0s to the left and right of the input @param[in] input_height_zero_padding Add 0s to the top and bottom of the input @param[in] depth The number of convolution filters @param[in] extent_width The width of the filter @param[in] extent_height The height of the filter @param[in] stride The spacing between filters @param[in] output_width_zero_padding Add 0s to the left and right of the output @param[in] output_height_zero_padding Add 0s to the top and bottom of the output @returns vector of output node names */ std::vector<std::string> addConvolution(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int & input_width, const int & input_height, const int& input_width_zero_padding, const int& input_height_zero_padding, const int & extent_width, const int & extent_height, const int & stride, const int & output_width_zero_padding, const int& output_height_zero_padding, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool split_filter_layers = true, bool share_weights = true); void addConvolution(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const std::vector<std::string>& output_node_names, const int & input_width, const int & input_height, const int& input_width_zero_padding, const int& input_height_zero_padding, const int & extent_width, const int & extent_height, const int & stride, const int & output_width_zero_padding, const int& output_height_zero_padding, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, bool split_filter_layers = true); /** @brief Add a Projection layer (i.e., inverse convolution) The input is considered a linearized matrix in column order The output is considered a linearized matrix in column order BUG: addition of bias causes an odd bug in model interpreter Overload is provided to add additional filters that operate over the same input and output nodes @param[in, out] Model @param[in] source_node_names Node_names to add the layer to @param[in] input_width The width of the input @param[in] input_height The height of the input @param[in] input_width_zero_padding Add 0s to the left and right of the input @param[in] input_height_zero_padding Add 0s to the top and bottom of the input @param[in] depth The number of convolution filters @param[in] extent_width The width of the filter @param[in] extent_height The height of the filter @param[in] stride The spacing between filters @param[in] output_width_zero_padding Add 0s to the left and right of the output @param[in] output_height_zero_padding Add 0s to the top and bottom of the output @returns vector of output node names */ std::vector<std::string> addProjection(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int & input_width, const int & input_height, const int& input_width_zero_padding, const int& input_height_zero_padding, const int & extent_width, const int & extent_height, const int & stride, const int & output_width_zero_padding, const int& output_height_zero_padding, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool split_filter_layers = true, bool share_weights = true); void addProjection(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const std::vector<std::string>& output_node_names, const int & input_width, const int & input_height, const int& input_width_zero_padding, const int& input_height_zero_padding, const int & extent_width, const int & extent_height, const int & stride, const int & output_width_zero_padding, const int& output_height_zero_padding, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, bool split_filter_layers = true); /** @brief Add a normalization layer with activation. If a learnable gain/offset or application of an activation is desired, the user can add a singly connected layer after the unit scale layer. @param[in, out] Model @param[in] source_node_names Node_names to add the fully connected layer to @param[in] node_activation The activation function of the hidden node to create @param[in] node_activation_grad The activation function gradient of the hidden node to create @param[in] node_integration The integration function of the hidden node to create @param[in] drop_out_prob Node drop out probability @param[in] drop_connection_prob Weight drop out probability @param[in] biases Whether to include bias nodes or not @returns vector of output node names */ std::vector<std::string> addNormalization(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const bool& specify_layers = false); /** @brief Add a unit scale layer. If a learnable gain/offset or application of an activation is desired, the user can add a singly connected layer after the unit scale layer. @param[in, out] Model @param[in] source_node_names Node_names to add the fully connected layer to @returns vector of output node names */ std::vector<std::string> addUnitScale(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const bool& specify_layers = false); /** @brief Add a unit scale layer. If a learnable gain/offset or application of an activation is desired, the user can add a singly connected layer after the unit scale layer. @param[in, out] Model @param[in] source_node_names Node_names to add the fully connected layer to @param[in] range_min The minimum value to project to @param[in] range_max The maximum value to project to @returns vector of output node names */ std::vector<std::string> addLinearScale(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const TensorT& range_min, const TensorT& range_max, const bool& specify_layers = false); /** @brief Add a VAE Encoding layer for a gaussian distribution with input node Note: Input node names generated by the method are the following "%s_%012d-Sampler" where "%s" is filled by the `name` argument @param[in, out] Model @param[in] mu_node_names Node_names from the average layer @param[in] logvar_node_names Nodes names from the logvar layer @returns vector of output node names */ std::vector<std::string> addGaussianEncoding(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& mu_node_names, const std::vector<std::string>& logvar_node_names, const bool& specify_layer = false); /** @brief Add a VAE Encoding layer for a Gumble/concrete categorical distribution with input node The categorical distribution is calculated as the following: yi = [exp((log(alphai) + gi)/tau)] / [SUM j=1 to N exp((log(alphaj) + gj)/tau)]; for i = 1; ...; k; with parameters alpha, sampled Gumbel values g, and temperature tau where the Gumbel(0; 1) distribution can be sampled using inverse transform sampling by drawing u  Uniform(0; 1) and computing g = -log(-log(u)). References: Maddison 2017 The concrete distribution Jang 2017 Categorical reparameterization with Gumbel-softmax Generated input node generated by the method are the following: "%s_%012d-InverseTau" (input values specified from 0 to inf for 1/tau) where "%s" is filled by the `name` argument "%s_%012d-GumbelSampler" (Gumbel sampled values) where "%s" is filled by the `name` argument @param[in, out] Model @param[in] alpha_node_names Nodes names from the catergorical logit layer @returns vector of output node names */ std::vector<std::string> addCategoricalEncoding(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& alpha_node_names, const bool& specify_layer = false); /** @brief Add a VAE Encoding layer with input node @param[in, out] Model @param[in] encoding_node_names Node_names for the latent distribution @returns vector of output node names */ std::vector<std::string> addDiscriminator(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& encoding_node_names); /** @brief Add a LSTM layer Reference: 1. <NAME>, and <NAME>. "Long short-term memory." Neural computation 9.8 (1997): 1735-1780. 2. <NAME>.; <NAME>. (2001). "LSTM Recurrent Networks Learn Simple Context Free and Context Sensitive Languages" (PDF). IEEE Transactions on Neural Networks. 12 (6): 1333–1340. doi:10.1109/72.963769. @param[in, out] Model @param[in] source_node_names Node_names to add the layer to @param[in] n_blocks The number of independent LSTM cell blocks @param[in] n_cells The number of shared memory cells per LSTM block @param[in] node_activation The activation function of the input node to create @param[in] node_activation_grad The activation function gradient of the input node to create @param[in] node_integration The integration function of the input node to create @param[in] node_integration_error The integration function of the input node to create @param[in] node_integration_weight_grad The integration function of the input node to create @param[in] drop_out_prob input or output Node drop out probability @param[in] drop_connection_prob input or output Weight drop out probability @param[in] biases Whether to include bias nodes or not @param[in] forget_gat Whether to include forget gates or not @param[in] block_version 1 Traditional: output multiplier is connected to block input and block gates 2 Peep holes: memory cell is connected to block gates @returns vector of output node names */ std::vector<std::string> addLSTM(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_blocks, const int& n_cells, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool forget_gate = true, int block_version = 1, const bool& specify_layer = false, bool specify_cyclic_pairs = false); std::vector<std::string> addLSTMBlock1(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_cells, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool forget_gate = true, const bool& specify_layer = false, bool specify_cyclic_pairs = false); std::vector<std::string> addLSTMBlock2(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_cells, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool forget_gate = true, const bool& specify_layer = false, bool specify_cyclic_pairs = false); /** @brief Add a GRU layer Reference: 1. <NAME>; <NAME>; <NAME>; <NAME>; <NAME>; <NAME>; <NAME> (2014). "Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation". arXiv:1406.1078 2. <NAME>., <NAME>., <NAME>., <NAME>. Minimal Gated Unit for Recurrent Neural Networks.arXiv preprint arXiv : 1603.09420v1, 2016. @param[in, out] Model @param[in] source_node_names Node_names to add the layer to @param[in] n_blocks The number of independent GRU cell blocks @param[in] node_activation The activation function of the input node to create @param[in] node_activation_grad The activation function gradient of the input node to create @param[in] node_integration The integration function of the input node to create @param[in] node_integration_error The integration function of the input node to create @param[in] node_integration_weight_grad The integration function of the input node to create @param[in] drop_out_prob input or output Node drop out probability @param[in] drop_connection_prob input or output Weight drop out probability @param[in] biases Whether to include bias nodes or not @param[in] input_gate_connection Whether to include an input connection to the gates @param[in] block_version 1 GRU: input and reset gates 2 MGRU: forget gate @returns vector of output node names */ std::vector<std::string> addGRU(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_blocks, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool forget_gate = true, int block_version = 1, const bool& specify_layer = false); std::vector<std::string> addGRU1(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool input_gate_connection = true, const bool& specify_layer = false); std::vector<std::string> addGRU2(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool input_gate_connection = true, const bool& specify_layer = false); /** @brief Add a dot product self attention layer with activation References: Vaswani, et al. 2017 Attention is all you need @param[in, out] Model @param[in] source_node_names Node_names to add the fully connected layer to ... @param[in] n_nodes The number of output nodes @param[in] node_activation The activation function of the hidden node to create @param[in] node_activation_grad The activation function gradient of the hidden node to create @param[in] node_integration The integration function of the hidden node to create @param[in] drop_out_prob Node drop out probability @param[in] drop_connection_prob Weight drop out probability @param[in] biases Whether to include bias nodes or not @returns vector of output node names */ std::vector<std::string> addMultiHeadAttention(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& query_node_names, const std::vector<std::string>& key_node_names, const std::vector<std::string>& values_node_names, const int& n_heads, const std::string& attention_type, const int & model_length, const int& key_length, const int& values_length, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool split_attention_layers = true); /** @brief Add a scaled dot product self attention layer with activation References: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Attention is all you need. arXiv preprint arXiv:1706.03762, 2017. */ std::vector<std::string> addDotProdAttention(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& query_node_names, const std::vector<std::string>& key_node_names, const std::vector<std::string>& values_node_names, const int& key_length, const int& values_length, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool split_attention_layers = true); /** @brief Add an additive attention layer with activation References: <NAME>, <NAME>, and <NAME>. Neural machine translation by jointly learning to align and translate. CoRR, abs/1409.0473, 2014. */ std::vector<std::string> addAdditiveAttention(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& query_node_names, const std::vector<std::string>& key_node_names, const std::vector<std::string>& values_node_names, const int& key_length, const int& values_length, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool split_attention_layers = true); /** @brief Add a concatenation attention layer with activation References: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Google's neural machine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144, 2016. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538, 2017. */ std::vector<std::string> addConcatAttention(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& query_node_names, const std::vector<std::string>& key_node_names, const std::vector<std::string>& values_node_names, const int& key_length, const int& values_length, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob = 0.0f, const TensorT& drop_connection_prob = 0.0f, const bool& biases = true, bool split_attention_layers = true); /** @brief Add a Scalar layer to the model @param[in, out] Model @param[in] source_node_names Node_names to add the fully connected layer to @param[in] n_nodes The number of output nodes @param[in] scalar_value The value of the scalar @param[in] node_activation The activation function of the hidden node to create @param[in] node_activation_grad The activation function gradient of the hidden node to create @param[in] specify_layer Whether to specify the layer or not @returns vector of output node names */ std::vector<std::string> addScalar(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const TensorT& scalar_value, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const bool& specify_layer = false); /** @brief Add a Gaussian posterior to the model. The loss is then calculated on the output nodes using `NegativeLogLikelihoodLoss` with an expectation of 1 and scaled by the inverse of the batch size as specified in the original Bayes by Backprop formulation Reference: Blundell 2015 Weight uncertainty in neural networks arXiv:1505.05424 and the tutorial @ https://gluon.mxnet.io/chapter18_variational-methods-and-uncertainty/bayes-by-backprop.html @param[in, out] Model @param[in] mu_node_names Node_names of the mean output layer @param[in] logvar_node_names Node_names of the logvar output layer @param[in] gaussian_node_names Node_names of the guassian output layer @param[in] specify_layer Whether to specify the layer or not @returns vector of output node names */ std::vector<std::string> addGaussianPosterior(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& mu_node_names, const std::vector<std::string>& logvar_node_names, const std::vector<std::string>& gaussian_node_names, const bool& specify_layer = false); /* @brief Add a Gaussian difference layer according to the calculations scaling = 1.0 / nd.sqrt(2.0 * np.pi * (sigma ** 2)) bell = nd.exp(- (x - mu) ** 2 / (2.0 * sigma ** 2)) return scaling * bell */ std::vector<std::string> addGaussian_(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& mu_node_names, const std::vector<std::string>& logvar_node_names, const std::vector<std::string>& gaussian_node_names, const bool& specify_layer = false); /** @brief Add a mixed Gaussian prior to the model. The loss is then calculated on the output nodes using `NegativeLogLikelihoodLoss` with an expectation of 1 and scaled by the inverse of the batch size as specified in the original Bayes by Backprop formulation Calculations: first_gaussian = pi * gaussian(x, 0., sigma_p1) second_gaussian = (1 - pi) * gaussian(x, 0., sigma_p2) return first_gaussian + second_gaussian Reference: Blundell 2015 Weight uncertainty in neural networks arXiv:1505.05424 and the tutorial @ https://gluon.mxnet.io/chapter18_variational-methods-and-uncertainty/bayes-by-backprop.html @param[in, out] Model @param[in] gaussian_node_names Node_names of the guassian output layer @param[in] logvar_1 Variance 1: -log sigma_1 {0, 1, 2} @param[in] logvar_2 Variance 2: -log sigma_2 {3, 4, 5} @param[in] pi Mixture percent: pi {0.25, 0.5, 0.75} @param[in] specify_layer Whether to specify the layer or not @returns vector of output node names */ std::vector<std::string> addMixedGaussianPior(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& gaussian_node_names, const TensorT& sigma_1, const TensorT& sigma_2, const TensorT& pi, const bool& specify_layer = false); /** @brief Add a fully connected Bayesian layer to a model @param[in, out] Model @param[in] source_node_names Node_names to add the fully connected layer to @param[in] n_nodes The number of output nodes @param[in] node_activation The activation function of the hidden node to create @param[in] node_activation_grad The activation function gradient of the hidden node to create @param[in] node_integration The integration function of the hidden node to create ... @param[in] logvar_1 Variance 1: -log sigma_1 {0, 1, 2} @param[in] logvar_2 Variance 2: -log sigma_2 {3, 4, 5} @param[in] pi Mixture percent: pi {0.25, 0.5, 0.75} @param[in] node_names_logvar Node_names of the logvar layer @param[in] node_names_posterior_output Node_names of the posterior layer @param[in] node_names_prior_output Node_names of the prior layer @param[out] @returns vector of output node names */ std::vector<std::string> addFullyConnectedBayesian(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_nodes, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init_mu, const std::shared_ptr<SolverOp<TensorT>>& solver_mu, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init_logvar, const std::shared_ptr<SolverOp<TensorT>>& solver_logvar, const TensorT& sigma_1, const TensorT& sigma_2, const TensorT& pi, std::vector<std::string>& node_names_logvar_output, std::vector<std::string>& node_names_posterior_output, std::vector<std::string>& node_names_prior_output, const bool& specify_layer = false); /** @brief Add one model to another @param[in, out] Model @param[in] source_node_names Node_names in the LH model to add to @param[in] sink_node_names Node names in the RH model to join @param[in] model_rh The RH model to add to the LH model @returns vector of output node names */ std::vector<std::string> addModel(Model<TensorT>& model, const std::vector<std::string>& source_node_names, const std::vector<std::string>& sink_node_names, const Model<TensorT>& model_rh); /* @brief Make a unity weight */ std::string makeUnityWeight(Model<TensorT>& model, const TensorT& scale, const std::string& module_name, const std::string& name_format, const std::string& lhs, const std::string& rhs, const bool& specify_layer = false); }; template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addInputNodes(Model<TensorT> & model, const std::string & name, const std::string & module_name, const int & n_nodes, const bool& specify_layer) { std::vector<std::string> node_names; // Create the input nodes for (int i = 0; i < n_nodes; ++i) { char* node_name_char = new char[512]; sprintf(node_name_char, "%s_%012d", name.data(), i); std::string node_name(node_name_char); node_names.push_back(node_name); Node<TensorT> node(node_name, NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); node.setModuleName(module_name); if (specify_layer) node.setLayerName(module_name); model.addNodes({ node }); delete[] node_name_char; } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addFullyConnected(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_nodes, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, const bool& specify_layer) { std::vector<std::string> node_names; // Create the hidden nodes + biases and hidden to bias links for (int i = 0; i < n_nodes; ++i) { char* node_name_char = new char[512]; sprintf(node_name_char, "%s_%012d", name.data(), i); std::string node_name(node_name_char); node_names.push_back(node_name); Node<TensorT> node(node_name, NodeType::hidden, NodeStatus::initialized, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad); node.setModuleName(module_name); node.setDropProbability(drop_out_prob); if (specify_layer) node.setLayerName(module_name); model.addNodes({ node }); delete[] node_name_char; if (biases) { char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-bias_%012d", name.data(), i); std::string bias_name(bias_name_char); Node<TensorT> bias(bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); model.addNodes({ bias }); delete[] bias_name_char; char* weight_bias_name_char = new char[512]; sprintf(weight_bias_name_char, "%s-bias_%012d_to_%s_%012d", name.data(), i, name.data(), i); std::string weight_bias_name(weight_bias_name_char); delete[] weight_bias_name_char; char* link_bias_name_char = new char[512]; sprintf(link_bias_name_char, "%s-bias_%012d_to_%s_%012d", name.data(), i, name.data(), i); std::string link_bias_name(link_bias_name_char); delete[] link_bias_name_char; std::shared_ptr<WeightInitOp<TensorT>> bias_weight_init; bias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>((TensorT)0)); std::shared_ptr<SolverOp<TensorT>> bias_solver = solver; Weight<TensorT> weight_bias(weight_bias_name, bias_weight_init, bias_solver); weight_bias.setModuleName(module_name); weight_bias.setDropProbability(drop_connection_prob); Link link_bias(link_bias_name, bias_name, node_name, weight_bias_name); link_bias.setModuleName(module_name); model.addWeights({ weight_bias }); model.addLinks({ link_bias }); } } // Create the weights and links for input to hidden for (int i = 0; i < source_node_names.size(); ++i) { for (int j = 0; j < n_nodes; ++j) { char* hidden_name_char = new char[512]; sprintf(hidden_name_char, "%s_%012d", name.data(), j); std::string hidden_name(hidden_name_char); delete[] hidden_name_char; char* link_name_char = new char[512]; sprintf(link_name_char, "%s_to_%s_%012d", source_node_names[i].data(), name.data(), j); std::string link_name(link_name_char); delete[] link_name_char; char* weight_name_char = new char[512]; sprintf(weight_name_char, "%s_to_%s_%012d", source_node_names[i].data(), name.data(), j); std::string weight_name(weight_name_char); delete[] weight_name_char; std::shared_ptr<WeightInitOp<TensorT>> hidden_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> hidden_solver = solver; Weight<TensorT> weight(weight_name, hidden_weight_init, hidden_solver); weight.setModuleName(module_name); weight.setDropProbability(drop_connection_prob); if (specify_layer) weight.setLayerName(module_name); Link link(link_name, source_node_names[i], hidden_name, weight_name); link.setModuleName(module_name); model.addWeights({ weight }); model.addLinks({ link }); } } return node_names; } template<typename TensorT> void ModelBuilder<TensorT>::addFullyConnected(Model<TensorT> & model, const std::string & module_name, const std::vector<std::string>& source_node_names, const std::vector<std::string>& sink_node_names, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_connection_prob, const bool& specify_layer) { // Create the weights and links for input to hidden for (const std::string& source_node_name : source_node_names) { for (const std::string& sink_node_name : sink_node_names) { char* link_name_char = new char[512]; sprintf(link_name_char, "%s_to_%s", source_node_name.data(), sink_node_name.data()); std::string link_name(link_name_char); delete[] link_name_char; char* weight_name_char = new char[512]; sprintf(weight_name_char, "%s_to_%s", source_node_name.data(), sink_node_name.data()); std::string weight_name(weight_name_char); delete[] weight_name_char; std::shared_ptr<WeightInitOp<TensorT>> hidden_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> hidden_solver = solver; Weight<TensorT> weight(weight_name, hidden_weight_init, hidden_solver); weight.setModuleName(module_name); weight.setDropProbability(drop_connection_prob); if (specify_layer) weight.setLayerName(module_name); Link link(link_name, source_node_name, sink_node_name, weight_name); link.setModuleName(module_name); model.addWeights({ weight }); model.addLinks({ link }); } } } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addSinglyConnected(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_nodes, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, const bool& specify_layer) { std::vector<std::string> node_names; assert(source_node_names.size() == n_nodes); // Create the hidden nodes + biases and hidden to bias links for (int i = 0; i < n_nodes; ++i) { char* node_name_char = new char[512]; sprintf(node_name_char, "%s_%012d", name.data(), i); std::string node_name(node_name_char); node_names.push_back(node_name); Node<TensorT> node(node_name, NodeType::hidden, NodeStatus::initialized, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad); node.setModuleName(module_name); node.setDropProbability(drop_out_prob); if (specify_layer) node.setLayerName(module_name); model.addNodes({ node }); delete[] node_name_char; if (biases) { char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-bias_%012d", name.data(), i); std::string bias_name(bias_name_char); Node<TensorT> bias(bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); model.addNodes({ bias }); delete[] bias_name_char; char* weight_bias_name_char = new char[512]; sprintf(weight_bias_name_char, "%s-bias_%012d_to_%s_%012d", name.data(), i, name.data(), i); std::string weight_bias_name(weight_bias_name_char); delete[] weight_bias_name_char; char* link_bias_name_char = new char[512]; sprintf(link_bias_name_char, "%s-bias_%012d_to_%s_%012d", name.data(), i, name.data(), i); std::string link_bias_name(link_bias_name_char); delete[] link_bias_name_char; std::shared_ptr<WeightInitOp<TensorT>> bias_weight_init; bias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> bias_solver = solver; Weight<TensorT> weight_bias(weight_bias_name, bias_weight_init, bias_solver); weight_bias.setModuleName(module_name); weight_bias.setDropProbability(drop_connection_prob); Link link_bias(link_bias_name, bias_name, node_name, weight_bias_name); link_bias.setModuleName(module_name); model.addWeights({ weight_bias }); model.addLinks({ link_bias }); } // Create the weights and links for input to hidden char* hidden_name_char = new char[512]; sprintf(hidden_name_char, "%s_%012d", name.data(), i); std::string hidden_name(hidden_name_char); delete[] hidden_name_char; char* link_name_char = new char[512]; sprintf(link_name_char, "%s_to_%s_%012d", source_node_names[i].data(), name.data(), i); std::string link_name(link_name_char); delete[] link_name_char; char* weight_name_char = new char[512]; sprintf(weight_name_char, "%s_to_%s_%012d", source_node_names[i].data(), name.data(), i); std::string weight_name(weight_name_char); delete[] weight_name_char; std::shared_ptr<WeightInitOp<TensorT>> hidden_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> hidden_solver = solver; Weight<TensorT> weight(weight_name, hidden_weight_init, hidden_solver); weight.setModuleName(module_name); weight.setDropProbability(drop_connection_prob); if (specify_layer) weight.setLayerName(module_name); Link link(link_name, source_node_names[i], hidden_name, weight_name); link.setModuleName(module_name); model.addWeights({ weight }); model.addLinks({ link }); } return node_names; } template<typename TensorT> void ModelBuilder<TensorT>::addSinglyConnected(Model<TensorT> & model, const std::string & module_name, const std::vector<std::string>& source_node_names, const std::vector<std::string>& sink_node_names, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_connection_prob, const bool& specify_layer) { assert(source_node_names.size() == sink_node_names.size()); // Create the weights and links for input to hidden for (int i=0; i<source_node_names.size(); ++i) { char* link_name_char = new char[512]; sprintf(link_name_char, "%s_to_%s", source_node_names[i].data(), sink_node_names[i].data()); std::string link_name(link_name_char); delete[] link_name_char; char* weight_name_char = new char[512]; sprintf(weight_name_char, "%s_to_%s", source_node_names[i].data(), sink_node_names[i].data()); std::string weight_name(weight_name_char); delete[] weight_name_char; std::shared_ptr<WeightInitOp<TensorT>> hidden_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> hidden_solver = solver; Weight<TensorT> weight(weight_name, hidden_weight_init, hidden_solver); weight.setModuleName(module_name); weight.setDropProbability(drop_connection_prob); if (specify_layer) weight.setLayerName(module_name); Link link(link_name, source_node_names[i], sink_node_names[i], weight_name); link.setModuleName(module_name); model.addWeights({ weight }); model.addLinks({ link }); } } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addBiases(Model<TensorT>& model, const std::string& module_name, const std::vector<std::string>& sink_node_names, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_connection_prob, const bool& specify_layer) { std::vector<std::string> biases_names; for (const std::string& node : sink_node_names) { // make the bias char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-bias", node.data()); std::string bias_name = std::string(bias_name_char); Node<TensorT> bias(bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); if (specify_layer) bias.setLayerName(module_name + "-bias"); model.addNodes({ bias }); biases_names.push_back(bias_name); delete[] bias_name_char; // make the bias weight char* weight_bias_name_char = new char[512]; sprintf(weight_bias_name_char, "%s_to_%s", bias_name.data(), node.data()); std::string weight_bias_name = std::string(weight_bias_name_char); Weight<TensorT> weight_bias(weight_bias_name, weight_init, solver); weight_bias.setModuleName(module_name); weight_bias.setDropProbability(drop_connection_prob); model.addWeights({ weight_bias }); delete[] weight_bias_name_char; // make the bias link char* link_bias_name_char = new char[512]; sprintf(link_bias_name_char, "%s_to_%s", bias_name.data(), node.data()); std::string link_bias_name(link_bias_name_char); Link link_bias(link_bias_name, bias_name, node, weight_bias_name); link_bias.setModuleName(module_name); model.addLinks({ link_bias }); delete[] link_bias_name_char; } return biases_names; } template<typename TensorT> inline std::vector<std::string> ModelBuilder<TensorT>::addHiddenNodes(Model<TensorT>& model, const std::string& name, const std::string& module_name, const int& n_nodes, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, const bool& specify_layer) { std::vector<std::string> node_names; // Create the hidden nodes + biases and hidden to bias links for (int i = 0; i < n_nodes; ++i) { char* node_name_char = new char[512]; sprintf(node_name_char, "%s_%012d", name.data(), i); std::string node_name(node_name_char); node_names.push_back(node_name); Node<TensorT> node(node_name, NodeType::hidden, NodeStatus::initialized, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad); node.setModuleName(module_name); node.setDropProbability(drop_out_prob); if (specify_layer) node.setLayerName(module_name); model.addNodes({ node }); delete[] node_name_char; if (biases) { char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-bias_%012d", name.data(), i); std::string bias_name(bias_name_char); Node<TensorT> bias(bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); model.addNodes({ bias }); delete[] bias_name_char; char* weight_bias_name_char = new char[512]; sprintf(weight_bias_name_char, "%s-bias_%012d_to_%s_%012d", name.data(), i, name.data(), i); std::string weight_bias_name(weight_bias_name_char); delete[] weight_bias_name_char; char* link_bias_name_char = new char[512]; sprintf(link_bias_name_char, "%s-bias_%012d_to_%s_%012d", name.data(), i, name.data(), i); std::string link_bias_name(link_bias_name_char); delete[] link_bias_name_char; std::shared_ptr<WeightInitOp<TensorT>> bias_weight_init; bias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> bias_solver = solver; Weight<TensorT> weight_bias(weight_bias_name, bias_weight_init, bias_solver); weight_bias.setModuleName(module_name); weight_bias.setDropProbability(drop_connection_prob); Link link_bias(link_bias_name, bias_name, node_name, weight_bias_name); link_bias.setModuleName(module_name); model.addWeights({ weight_bias }); model.addLinks({ link_bias }); } } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addSoftMax(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const bool& specify_layer) { std::vector<std::string> node_names; std::string unity_weight_name; // Create the Softmax Inverse/Sum node char* sms_node_name_char = new char[512]; sprintf(sms_node_name_char, "%s-Sum", name.data()); std::string sms_node_name(sms_node_name_char); Node<TensorT> sms_node(sms_node_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<InverseOp<TensorT>>(InverseOp<TensorT>()), std::make_shared<InverseGradOp<TensorT>>(InverseGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); sms_node.setModuleName(module_name); if (specify_layer) sms_node.setLayerName(module_name + "-SoftMaxSum"); model.addNodes({ sms_node }); delete[] sms_node_name_char; // Create the Softmax input/output layer for (int i = 0; i < source_node_names.size(); ++i) { // Create the input layer char* smi_node_name_char = new char[512]; sprintf(smi_node_name_char, "%s-In_%012d", name.data(), i); std::string smi_node_name(smi_node_name_char); Node<TensorT> smi_node(smi_node_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<ExponentialOp<TensorT>>(ExponentialOp<TensorT>()), std::make_shared<ExponentialGradOp<TensorT>>(ExponentialGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); smi_node.setModuleName(module_name); if (specify_layer) smi_node.setLayerName(module_name + "-SoftMaxIn"); smi_node.setModuleName(module_name); delete[] smi_node_name_char; // Create the output layer char* smo_node_name_char = new char[512]; sprintf(smo_node_name_char, "%s-Out_%012d", name.data(), i); std::string smo_node_name(smo_node_name_char); node_names.push_back(smo_node_name); Node<TensorT> smo_node(smo_node_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); smo_node.setModuleName(module_name); if (specify_layer) smo_node.setLayerName(module_name + "-SoftMaxOut"); smo_node.setModuleName(module_name); delete[] smo_node_name_char; model.addNodes({ smi_node, smo_node }); // Create the weights and links for the input to softmax input layer unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", source_node_names[i], smi_node_name); char* ismi_link_name_char = new char[512]; sprintf(ismi_link_name_char, "%s_to_%s", source_node_names[i].data(), smi_node_name.data()); std::string ismi_link_name(ismi_link_name_char); Link ismi_link(ismi_link_name, source_node_names[i], smi_node_name, unity_weight_name); ismi_link.setModuleName(module_name); model.addLinks({ ismi_link }); delete[] ismi_link_name_char; // Create the weights and links for the softmax input layer to softmax sum layer unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", smi_node_name, sms_node_name); char* smisms_link_name_char = new char[512]; sprintf(smisms_link_name_char, "%s_to_%s", smi_node_name.data(), sms_node_name.data()); std::string smisms_link_name(smisms_link_name_char); Link smisms_link(smisms_link_name, smi_node_name, sms_node_name, unity_weight_name); smisms_link.setModuleName(module_name); model.addLinks({ smisms_link }); delete[] smisms_link_name_char; // Create the weights and links for the softmax input layer to softmax output layer unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", smi_node_name, smo_node_name); char* smismo_link_name_char = new char[512]; sprintf(smismo_link_name_char, "%s_to_%s", smi_node_name.data(), smo_node_name.data()); std::string smismo_link_name(smismo_link_name_char); Link smismo_link(smismo_link_name, smi_node_name, smo_node_name, unity_weight_name); smismo_link.setModuleName(module_name); model.addLinks({ smismo_link }); delete[] smismo_link_name_char; // Create the weights and links for the softmax sum layer to softmax output layer unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", sms_node_name, smo_node_name); char* smssmo_link_name_char = new char[512]; sprintf(smssmo_link_name_char, "%s_to_%s", sms_node_name.data(), smo_node_name.data()); std::string smssmo_link_name(smssmo_link_name_char); Link smssmo_link(smssmo_link_name, sms_node_name, smo_node_name, unity_weight_name); smssmo_link.setModuleName(module_name); model.addLinks({ smssmo_link }); delete[] smssmo_link_name_char; } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addStableSoftMax(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const bool& specify_layer) { std::vector<std::string> node_names; std::string unity_weight_name, negunity_weight_name; // Create the Softmax Max offset node char* smm_node_name_char = new char[512]; sprintf(smm_node_name_char, "%s-Max", name.data()); std::string smm_node_name(smm_node_name_char); Node<TensorT> smm_node(smm_node_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<MaxOp<TensorT>>(MaxOp<TensorT>()), std::make_shared<MaxErrorOp<TensorT>>(MaxErrorOp<TensorT>()), std::make_shared<MaxWeightGradOp<TensorT>>(MaxWeightGradOp<TensorT>())); smm_node.setModuleName(module_name); if (specify_layer) smm_node.setLayerName(module_name +"-Max"); model.addNodes({ smm_node }); delete[] smm_node_name_char; // Create the Softmax Inverse/Sum node char* sms_node_name_char = new char[512]; sprintf(sms_node_name_char, "%s-Sum", name.data()); std::string sms_node_name(sms_node_name_char); Node<TensorT> sms_node(sms_node_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<InverseOp<TensorT>>(InverseOp<TensorT>()), std::make_shared<InverseGradOp<TensorT>>(InverseGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); sms_node.setModuleName(module_name); if (specify_layer) sms_node.setLayerName(module_name + "-Sum"); model.addNodes({ sms_node }); delete[] sms_node_name_char; // Create the Softmax input/output layer for (int i = 0; i < source_node_names.size(); ++i) { // Create the input layer char* smi_node_name_char = new char[512]; sprintf(smi_node_name_char, "%s-In_%012d", name.data(), i); std::string smi_node_name(smi_node_name_char); Node<TensorT> smi_node(smi_node_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<ExponentialOp<TensorT>>(ExponentialOp<TensorT>()), std::make_shared<ExponentialGradOp<TensorT>>(ExponentialGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); smi_node.setModuleName(module_name); if (specify_layer) smi_node.setLayerName(module_name + "-SoftMaxIn"); delete[] smi_node_name_char; // Create the output layer char* smo_node_name_char = new char[512]; sprintf(smo_node_name_char, "%s-Out_%012d", name.data(), i); std::string smo_node_name(smo_node_name_char); node_names.push_back(smo_node_name); Node<TensorT> smo_node(smo_node_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); smo_node.setModuleName(module_name); if (specify_layer) smo_node.setLayerName(module_name + "-SoftMaxOut"); delete[] smo_node_name_char; model.addNodes({ smi_node, smo_node }); // Create the weights and links for the input to softmax Max node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", source_node_names[i], smm_node_name); char* ismm_link_name_char = new char[512]; sprintf(ismm_link_name_char, "%s_to_%s", source_node_names[i].data(), smm_node_name.data()); std::string ismm_link_name(ismm_link_name_char); Link ismm_link(ismm_link_name, source_node_names[i], smm_node_name, unity_weight_name); ismm_link.setModuleName(module_name); model.addLinks({ ismm_link }); delete[] ismm_link_name_char; // Create the weights and links for the softmax Max node softmax input layer negunity_weight_name = makeUnityWeight(model, -1.0, module_name, "%s_to_%s", smm_node_name, smi_node_name); char* smmsmi_link_name_char = new char[512]; sprintf(smmsmi_link_name_char, "%s_to_%s", smm_node_name.data(), smi_node_name.data()); std::string smmsmi_link_name(smmsmi_link_name_char); Link smmsmi_link(smmsmi_link_name, smm_node_name, smi_node_name, negunity_weight_name); smmsmi_link.setModuleName(module_name); model.addLinks({ smmsmi_link }); delete[] smmsmi_link_name_char; // Create the weights and links for the input to softmax input layer unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", source_node_names[i], smi_node_name); char* ismi_link_name_char = new char[512]; sprintf(ismi_link_name_char, "%s_to_%s", source_node_names[i].data(), smi_node_name.data()); std::string ismi_link_name(ismi_link_name_char); Link ismi_link(ismi_link_name, source_node_names[i], smi_node_name, unity_weight_name); ismi_link.setModuleName(module_name); model.addLinks({ ismi_link }); delete[] ismi_link_name_char; // Create the weights and links for the softmax input layer to softmax sum layer unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", smi_node_name, sms_node_name); char* smisms_link_name_char = new char[512]; sprintf(smisms_link_name_char, "%s_to_%s", smi_node_name.data(), sms_node_name.data()); std::string smisms_link_name(smisms_link_name_char); Link smisms_link(smisms_link_name, smi_node_name, sms_node_name, unity_weight_name); smisms_link.setModuleName(module_name); model.addLinks({ smisms_link }); delete[] smisms_link_name_char; // Create the weights and links for the softmax input layer to softmax output layer unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", smi_node_name, smo_node_name); char* smismo_link_name_char = new char[512]; sprintf(smismo_link_name_char, "%s_to_%s", smi_node_name.data(), smo_node_name.data()); std::string smismo_link_name(smismo_link_name_char); Link smismo_link(smismo_link_name, smi_node_name, smo_node_name, unity_weight_name); smismo_link.setModuleName(module_name); model.addLinks({ smismo_link }); delete[] smismo_link_name_char; // Create the weights and links for the softmax sum layer to softmax output layer unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", sms_node_name, smo_node_name); char* smssmo_link_name_char = new char[512]; sprintf(smssmo_link_name_char, "%s_to_%s", sms_node_name.data(), smo_node_name.data()); std::string smssmo_link_name(smssmo_link_name_char); Link smssmo_link(smssmo_link_name, sms_node_name, smo_node_name, unity_weight_name); smssmo_link.setModuleName(module_name); model.addLinks({ smssmo_link }); delete[] smssmo_link_name_char; } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addConvolution(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int & input_width, const int & input_height, const int& input_width_zero_padding, const int& input_height_zero_padding, const int & extent_width, const int & extent_height, const int & stride, const int & output_width_zero_padding, const int& output_height_zero_padding, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, bool split_filter_layers, bool share_weights) { std::vector<std::string> node_names; // Parameters for the Convolution layer assert(source_node_names.size() == input_width * input_height); int input_padded_width = input_width + 2 * input_width_zero_padding; //assert((input_padded_width - extent_width) % stride == 0); if ((input_padded_width - extent_width) % stride != 0) std::cout << "Warning: input width, filter width, and stride lengths will not allow for uniform coverage during convolution." << std::endl; int strides_width = std::floor((input_padded_width - extent_width) / stride) +1; // includes the starting stride int input_padded_height = input_height + 2 * input_height_zero_padding; //assert((input_padded_height - extent_height) % stride == 0); if ((input_padded_height - extent_height) % stride != 0) std::cout << "Warning: input height, filter height, and stride lengths will not allow for uniform coverage during convolution." << std::endl; int strides_height = std::floor((input_padded_height - extent_height) / stride) +1; // includes the starting stride int output_nodes = strides_width + strides_height; int output_padded_width = strides_width + 2 * output_width_zero_padding; int output_padded_height = strides_height + 2 * output_height_zero_padding; std::string bias_name; std::string weight_bias_name; if (biases && share_weights) { // Create the filter bias char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-bias", name.data()); bias_name = std::string(bias_name_char); Node<TensorT> bias(bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); model.addNodes({ bias }); delete[] bias_name_char; // Create the shared weights for each bias to output node char* weight_bias_name_char = new char[512]; sprintf(weight_bias_name_char, "%s_to_out", bias_name.data()); weight_bias_name = std::string(weight_bias_name_char); Weight<TensorT> weight_bias(weight_bias_name, weight_init, solver); weight_bias.setModuleName(module_name); model.addWeights({ weight_bias }); delete[] weight_bias_name_char; } // Create the output zero padding nodes for (size_t output_width_iter = 0; output_width_iter < output_padded_width; ++output_width_iter) { for (size_t output_height_iter = 0; output_height_iter < output_padded_height; ++output_height_iter) { if (output_height_iter < output_height_zero_padding || output_height_iter >= output_padded_height - output_height_zero_padding) { char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-out-padding_H%012d-W%012d", name.data(), output_height_iter, output_width_iter); std::string bias_name(bias_name_char); Node<TensorT> bias(bias_name, NodeType::zero, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); if (split_filter_layers) bias.setLayerName(module_name); model.addNodes({ bias }); node_names.push_back(bias_name); delete[] bias_name_char; } else if (output_width_iter < output_width_zero_padding || output_width_iter >= output_padded_width - output_width_zero_padding) { char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-out-padding_H%012d-W%012d", name.data(), output_height_iter, output_width_iter); std::string bias_name(bias_name_char); Node<TensorT> bias(bias_name, NodeType::zero, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); if (split_filter_layers) bias.setLayerName(module_name); model.addNodes({ bias }); node_names.push_back(bias_name); delete[] bias_name_char; } else { char* output_name_char = new char[512]; sprintf(output_name_char, "%s-out_H%012d-W%012d", name.data(), output_height_iter, output_width_iter); std::string output_name(output_name_char); Node<TensorT> output(output_name, NodeType::hidden, NodeStatus::initialized, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad); output.setModuleName(module_name); output.setDropProbability(drop_out_prob); if (split_filter_layers) output.setLayerName(module_name); model.addNodes({ output }); node_names.push_back(output_name); delete[] output_name_char; if (biases && share_weights) { // Create the links between the bias and output nodes char* link_bias_name_char = new char[512]; sprintf(link_bias_name_char, "%s_to_%s_%s", bias_name.data(), output_name.data(), module_name.data()); std::string link_bias_name(link_bias_name_char); Link link_bias(link_bias_name, bias_name, output_name, weight_bias_name); link_bias.setModuleName(module_name); model.addLinks({ link_bias }); delete[] link_bias_name_char; } else if (biases) { // Create the filter bias char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-bias", output_name.data()); bias_name = std::string(bias_name_char); Node<TensorT> bias(bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); model.addNodes({ bias }); delete[] bias_name_char; // Create the shared weights for each bias to output node char* weight_bias_name_char = new char[512]; sprintf(weight_bias_name_char, "%s_to_%s_%s", bias_name.data(), output_name.data(), module_name.data()); weight_bias_name = std::string(weight_bias_name_char); Weight<TensorT> weight_bias(weight_bias_name, weight_init, solver); weight_bias.setModuleName(module_name); model.addWeights({ weight_bias }); delete[] weight_bias_name_char; // Create the links between the bias and output nodes char* link_bias_name_char = new char[512]; sprintf(link_bias_name_char, "%s_to_%s_%s", bias_name.data(), output_name.data(), module_name.data()); std::string link_bias_name(link_bias_name_char); Link link_bias(link_bias_name, bias_name, output_name, weight_bias_name); link_bias.setModuleName(module_name); model.addLinks({ link_bias }); delete[] link_bias_name_char; } } } } if (share_weights) { // Create the shared weights for each filter link for (size_t filter_height_iter = 0; filter_height_iter < extent_height; ++filter_height_iter) { for (size_t filter_width_iter = 0; filter_width_iter < extent_width; ++filter_width_iter) { char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s-%s_H%012d-W%012d", name.data(), module_name.data(), filter_height_iter, filter_width_iter); std::string weight_filter_name(weight_filter_name_char); Weight<TensorT> weight_filter(weight_filter_name, weight_init, solver); weight_filter.setModuleName(module_name); weight_filter.setDropProbability(drop_connection_prob); if (split_filter_layers) weight_filter.setLayerName(module_name); model.addWeights({ weight_filter }); delete[] weight_filter_name_char; } } } // Create the convolution links between input and output int tmp = 0; int output_width_iter = 0; for (size_t width_stride_iter = 0; width_stride_iter < strides_width; ++width_stride_iter) { // check if the filter is in the left input width zero padding const int filter_width_end = stride * width_stride_iter + extent_width; if (filter_width_end <= input_width_zero_padding) continue; // check if the filter is in the right input width zero padding const int filter_width_start = stride * width_stride_iter; if (filter_width_start >= input_width_zero_padding + input_width) continue; // offset the starting width filter for the input zero padding int filter_width_offset_start_tmp = input_width_zero_padding - stride * width_stride_iter; int filter_width_offset_start = maxFunc(filter_width_offset_start_tmp, 0); int filter_width_offset_end_tmp = -input_width_zero_padding + stride * strides_width - stride * width_stride_iter + extent_width; int filter_width_offset_end = minFunc(filter_width_offset_end_tmp, extent_width); int output_height_iter = 0; for (size_t height_stride_iter = 0; height_stride_iter < strides_height; ++height_stride_iter) { // check if the filter is in the top input height zero padding const int filter_height_end = stride * height_stride_iter + extent_height; if (filter_height_end <= input_height_zero_padding) continue; // check if the filter is in the bottom input height zero padding const int filter_height_start = stride * height_stride_iter; if (filter_height_start >= input_height_zero_padding + input_height) continue; // offset starting height filter for the input zero padding int filter_height_offset_start_tmp = input_height_zero_padding - stride * height_stride_iter; int filter_height_offset_start = maxFunc(filter_height_offset_start_tmp, 0); int filter_height_offset_end_tmp = -input_height_zero_padding + stride * strides_height - stride * height_stride_iter + extent_height; int filter_height_offset_end = minFunc(filter_height_offset_end_tmp, extent_height); // create the links between input and output int width_iter_tmp = stride * width_stride_iter - input_width_zero_padding; int width_iter = maxFunc(width_iter_tmp, 0); for (size_t filter_width_iter = filter_width_offset_start; filter_width_iter < filter_width_offset_end; ++filter_width_iter) { int height_iter_tmp = stride * height_stride_iter - input_height_zero_padding; int height_iter = maxFunc(height_iter_tmp, 0); for (size_t filter_height_iter = filter_height_offset_start; filter_height_iter < filter_height_offset_end; ++filter_height_iter) { int source_node_iter = height_iter + width_iter * input_height; if (source_node_iter >= source_node_names.size()) { //std::cout << "WARNING: node size has been exceeded!" << std::endl; break; } // Output node name char* output_name_char = new char[512]; sprintf(output_name_char, "%s-out_H%012d-W%012d", name.data(), output_height_iter + output_height_zero_padding, output_width_iter + output_width_zero_padding); std::string output_name(output_name_char); delete[] output_name_char; // Weight<TensorT> name std::string weight_filter_name; if (share_weights) { char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s-%s_H%012d-W%012d", name.data(), module_name.data(), filter_height_iter, filter_width_iter); weight_filter_name = std::string(weight_filter_name_char); delete[] weight_filter_name_char; } else { char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s_to_%s_%s", source_node_names.at(source_node_iter).data(), output_name.data(), module_name.data()); weight_filter_name = std::string(weight_filter_name_char); Weight<TensorT> weight_filter(weight_filter_name, weight_init, solver); weight_filter.setModuleName(module_name); weight_filter.setDropProbability(drop_connection_prob); if (split_filter_layers) weight_filter.setLayerName(module_name); model.addWeights({ weight_filter }); delete[] weight_filter_name_char; } // Link name char* link_filter_name_char = new char[512]; sprintf(link_filter_name_char, "%s_to_%s_%s", source_node_names[source_node_iter].data(), output_name.data(), module_name.data()); std::string link_filter_name(link_filter_name_char); delete[] link_filter_name_char; Link link_filter(link_filter_name, source_node_names[source_node_iter], output_name, weight_filter_name); link_filter.setModuleName(module_name); model.addLinks({ link_filter }); ++height_iter; } ++width_iter; } ++output_height_iter; } ++output_width_iter; } return node_names; } template<typename TensorT> void ModelBuilder<TensorT>::addConvolution(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const std::vector<std::string>& sink_node_names, const int & input_width, const int & input_height, const int& input_width_zero_padding, const int& input_height_zero_padding, const int & extent_width, const int & extent_height, const int & stride, const int & output_width_zero_padding, const int& output_height_zero_padding, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, bool split_filter_layers) { // Parameters for the Convolution layer assert(source_node_names.size() == input_width * input_height); int input_padded_width = input_width + 2 * input_width_zero_padding; //assert((input_padded_width - extent_width) % stride == 0); if ((input_padded_width - extent_width) % stride != 0) std::cout << "Warning: input width, filter width, and stride lengths will not allow for uniform coverage during convolution." << std::endl; int strides_width = std::floor((input_padded_width - extent_width) / stride) + 1; // includes the starting stride int input_padded_height = input_height + 2 * input_height_zero_padding; //assert((input_padded_height - extent_height) % stride == 0); if ((input_padded_height - extent_height) % stride != 0) std::cout << "Warning: input height, filter height, and stride lengths will not allow for uniform coverage during convolution." << std::endl; int strides_height = std::floor((input_padded_height - extent_height) / stride) + 1; // includes the starting stride int output_nodes = strides_width + strides_height; int output_padded_width = strides_width + 2 * output_width_zero_padding; int output_padded_height = strides_height + 2 * output_height_zero_padding; assert(sink_node_names.size() == output_padded_width * output_padded_height); // Create the shared weights for each filter link for (size_t filter_height_iter = 0; filter_height_iter < extent_height; ++filter_height_iter) { for (size_t filter_width_iter = 0; filter_width_iter < extent_width; ++filter_width_iter) { char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s-%s_H%012d-W%012d", name.data(), module_name.data(), filter_height_iter, filter_width_iter); std::string weight_filter_name(weight_filter_name_char); Weight<TensorT> weight_filter(weight_filter_name, weight_init, solver); weight_filter.setModuleName(module_name); weight_filter.setDropProbability(drop_connection_prob); if (split_filter_layers) weight_filter.setLayerName(module_name); model.addWeights({ weight_filter }); delete[] weight_filter_name_char; } } // Create the convolution links between input and output int tmp = 0; int output_width_iter = 0; for (size_t width_stride_iter = 0; width_stride_iter < strides_width; ++width_stride_iter) { // check if the filter is in the left input width zero padding const int filter_width_end = stride * width_stride_iter + extent_width; if (filter_width_end <= input_width_zero_padding) continue; // check if the filter is in the right input width zero padding const int filter_width_start = stride * width_stride_iter; if (filter_width_start >= input_width_zero_padding + input_width) continue; // offset the starting width filter for the input zero padding int filter_width_offset_start_tmp = input_width_zero_padding - stride * width_stride_iter; int filter_width_offset_start = maxFunc(filter_width_offset_start_tmp, 0); int filter_width_offset_end_tmp = -input_width_zero_padding + stride * strides_width - stride * width_stride_iter + extent_width; int filter_width_offset_end = minFunc(filter_width_offset_end_tmp, extent_width); int output_height_iter = 0; for (size_t height_stride_iter = 0; height_stride_iter < strides_height; ++height_stride_iter) { // check if the filter is in the top input height zero padding const int filter_height_end = stride * height_stride_iter + extent_height; if (filter_height_end <= input_height_zero_padding) continue; // check if the filter is in the bottom input height zero padding const int filter_height_start = stride * height_stride_iter; if (filter_height_start >= input_height_zero_padding + input_height) continue; // offset starting height filter for the input zero padding int filter_height_offset_start_tmp = input_height_zero_padding - stride * height_stride_iter; int filter_height_offset_start = maxFunc(filter_height_offset_start_tmp, 0); int filter_height_offset_end_tmp = -input_height_zero_padding + stride * strides_height - stride * height_stride_iter + extent_height; int filter_height_offset_end = minFunc(filter_height_offset_end_tmp, extent_height); // create the links between input and output int width_iter_tmp = stride * width_stride_iter - input_width_zero_padding; int width_iter = maxFunc(width_iter_tmp, 0); for (size_t filter_width_iter = filter_width_offset_start; filter_width_iter < filter_width_offset_end; ++filter_width_iter) { int height_iter_tmp = stride * height_stride_iter - input_height_zero_padding; int height_iter = maxFunc(height_iter_tmp, 0); for (size_t filter_height_iter = filter_height_offset_start; filter_height_iter < filter_height_offset_end; ++filter_height_iter) { int source_node_iter = height_iter + width_iter * input_height; if (source_node_iter >= source_node_names.size()) { //std::cout << "WARNING: node size has been exceeded!" << std::endl; break; } // Weight<TensorT> name char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s-%s_H%012d-W%012d", name.data(), module_name.data(), filter_height_iter, filter_width_iter); std::string weight_filter_name(weight_filter_name_char); delete[] weight_filter_name_char; // Output node name char* output_name_char = new char[512]; sprintf(output_name_char, "%s-out_H%012d-W%012d", name.data(), output_height_iter + output_height_zero_padding, output_width_iter + output_width_zero_padding); std::string output_name(output_name_char); assert(std::count(sink_node_names.begin(), sink_node_names.end(), output_name) == 1); delete[] output_name_char; // Link name char* link_filter_name_char = new char[512]; sprintf(link_filter_name_char, "%s_to_%s_%s", source_node_names[source_node_iter].data(), output_name.data(), module_name.data()); std::string link_filter_name(link_filter_name_char); delete[] link_filter_name_char; Link link_filter(link_filter_name, source_node_names[source_node_iter], output_name, weight_filter_name); link_filter.setModuleName(module_name); model.addLinks({ link_filter }); ++height_iter; } ++width_iter; } ++output_height_iter; } ++output_width_iter; } } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addProjection(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int & input_width, const int & input_height, const int& input_width_zero_padding, const int& input_height_zero_padding, const int & extent_width, const int & extent_height, const int & spacing, const int & output_width_zero_padding, const int& output_height_zero_padding, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, bool split_filter_layers, bool share_weights) { std::vector<std::string> node_names; // Parameters for the Convolution layer assert(source_node_names.size() == input_width * input_height); int input_padded_width = input_width + 2 * input_width_zero_padding; int input_padded_height = input_height + 2 * input_height_zero_padding; int strides_width = input_padded_width; int strides_height = input_padded_height; int output_width = input_padded_width + (extent_width - 1) + input_padded_width * (spacing - 1); int output_height = input_padded_height + (extent_height - 1) + input_padded_height * (spacing - 1); int output_padded_width = output_width + 2 * output_width_zero_padding; int output_padded_height = output_height + 2 * output_height_zero_padding; // [TODO: would need to be refactored to add a bias for each filter output (i.e., extent_width * extent_height)] std::string bias_name; //std::string weight_bias_name; //if (biases) { // // Create the filter bias // char* bias_name_char = new char[512]; // sprintf(bias_name_char, "%s-bias", name.data()); // bias_name = std::string(bias_name_char); // Node<TensorT> bias(bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); // bias.setModuleName(module_name); // model.addNodes({ bias }); // // Create the shared weights for each bias to output node // char* weight_bias_name_char = new char[512]; // sprintf(weight_bias_name_char, "%s_to_out", bias_name.data()); // weight_bias_name = std::string(weight_bias_name_char); // Weight<TensorT> weight_bias(weight_bias_name, weight_init, solver); // weight_bias.setModuleName(module_name); // weight_bias.setDropProbability(drop_connection_prob); // model.addWeights({ weight_bias }); //} // Create the output zero padding nodes for (size_t output_width_iter = 0; output_width_iter < output_padded_width; ++output_width_iter) { for (size_t output_height_iter = 0; output_height_iter < output_padded_height; ++output_height_iter) { if (output_height_iter < output_height_zero_padding || output_height_iter >= output_padded_height - output_height_zero_padding) { char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-out-padding_H%012d-W%012d", name.data(), output_height_iter, output_width_iter); std::string bias_name(bias_name_char); Node<TensorT> bias(bias_name, NodeType::zero, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); if (split_filter_layers) bias.setLayerName(module_name); model.addNodes({ bias }); node_names.push_back(bias_name); delete[] bias_name_char; } else if (output_width_iter < output_width_zero_padding || output_width_iter >= output_padded_width - output_width_zero_padding) { char* bias_name_char = new char[512]; sprintf(bias_name_char, "%s-out-padding_H%012d-W%012d", name.data(), output_height_iter, output_width_iter); std::string bias_name(bias_name_char); Node<TensorT> bias(bias_name, NodeType::zero, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); bias.setModuleName(module_name); if (split_filter_layers) bias.setLayerName(module_name); model.addNodes({ bias }); node_names.push_back(bias_name); delete[] bias_name_char; } else { char* output_name_char = new char[512]; sprintf(output_name_char, "%s-out_H%012d-W%012d", name.data(), output_height_iter, output_width_iter); std::string output_name(output_name_char); Node<TensorT> output(output_name, NodeType::hidden, NodeStatus::activated, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad); output.setModuleName(module_name); output.setDropProbability(drop_out_prob); if (split_filter_layers) output.setLayerName(module_name); model.addNodes({ output }); node_names.push_back(output_name); delete[] output_name_char; //if (biases) { // // Create the links between the bias and output nodes // char* link_bias_name_char = new char[512]; // sprintf(link_bias_name_char, "%s_to_%s_%s", bias_name.data(), output_name.data(), module_name.data()); // std::string link_bias_name(link_bias_name_char); // Link link_bias(link_bias_name, bias_name, output_name, weight_bias_name); // link_bias.setModuleName(module_name); // model.addLinks({ link_bias }); //} } } } if (share_weights) { // Create the shared weights for each filter link for (size_t filter_height_iter = 0; filter_height_iter < extent_height; ++filter_height_iter) { for (size_t filter_width_iter = 0; filter_width_iter < extent_width; ++filter_width_iter) { char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s-%s_H%012d-W%012d", name.data(), module_name.data(), filter_height_iter, filter_width_iter); std::string weight_filter_name(weight_filter_name_char); Weight<TensorT> weight_filter(weight_filter_name, weight_init, solver); weight_filter.setModuleName(module_name); weight_filter.setDropProbability(drop_connection_prob); if (split_filter_layers) weight_filter.setLayerName(module_name); model.addWeights({ weight_filter }); delete[] weight_filter_name_char; } } } // Create the projection links between input and output int tmp = 0; for (size_t width_stride_iter = 0; width_stride_iter < strides_width; ++width_stride_iter) { // check if the filter is in the left input width zero padding const int filter_width_end = (spacing - 1) * width_stride_iter + width_stride_iter + extent_width - 1; if (width_stride_iter < input_width_zero_padding) continue; // check if the filter is in the right input width zero padding const int filter_width_start = (spacing - 1) * width_stride_iter + width_stride_iter; if (width_stride_iter >= input_width_zero_padding + input_width) continue; for (size_t height_stride_iter = 0; height_stride_iter < strides_height; ++height_stride_iter) { // check if the filter is in the top input height zero padding const int filter_height_end = (spacing - 1) * height_stride_iter + height_stride_iter + extent_height - 1; if (height_stride_iter < input_height_zero_padding) continue; // check if the filter is in the bottom input height zero padding const int filter_height_start = (spacing - 1) * height_stride_iter + height_stride_iter; if (height_stride_iter >= input_height_zero_padding + input_height) continue; // create the links between input and output int width_iter = width_stride_iter - input_width_zero_padding; int height_iter = height_stride_iter - input_height_zero_padding; int source_node_iter = height_iter + width_iter * input_height; if (source_node_iter >= source_node_names.size()) { //std::cout << "WARNING: node size has been exceeded!" << std::endl; break; } int filter_width_iter = 0; for (size_t filter_width_pos = filter_width_start; filter_width_pos <= filter_width_end; ++filter_width_pos) { int filter_height_iter = 0; for (size_t filter_height_pos = filter_height_start; filter_height_pos <= filter_height_end; ++filter_height_pos) { // Output node name char* output_name_char = new char[512]; sprintf(output_name_char, "%s-out_H%012d-W%012d", name.data(), filter_height_pos + output_height_zero_padding, filter_width_pos + output_width_zero_padding); std::string output_name(output_name_char); delete[] output_name_char; // Weight name std::string weight_filter_name; if (share_weights) { char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s-%s_H%012d-W%012d", name.data(), module_name.data(), filter_height_iter, filter_width_iter); weight_filter_name = std::string(weight_filter_name_char); delete[] weight_filter_name_char; } else { char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s_to_%s_%s", source_node_names[source_node_iter].data(), output_name.data(), module_name.data()); weight_filter_name = std::string(weight_filter_name_char); Weight<TensorT> weight_filter(weight_filter_name, weight_init, solver); weight_filter.setModuleName(module_name); weight_filter.setDropProbability(drop_connection_prob); if (split_filter_layers) weight_filter.setLayerName(module_name); model.addWeights({ weight_filter }); delete[] weight_filter_name_char; } // Link name char* link_filter_name_char = new char[512]; sprintf(link_filter_name_char, "%s_to_%s_%s", source_node_names[source_node_iter].data(), output_name.data(), module_name.data()); std::string link_filter_name(link_filter_name_char); delete[] link_filter_name_char; Link link_filter(link_filter_name, source_node_names[source_node_iter], output_name, weight_filter_name); link_filter.setModuleName(module_name); model.addLinks({ link_filter }); ++filter_height_iter; } ++filter_width_iter; } } } return node_names; } template<typename TensorT> inline void ModelBuilder<TensorT>::addProjection(Model<TensorT>& model, const std::string & name, const std::string & module_name, const std::vector<std::string>& source_node_names, const std::vector<std::string>& output_node_names, const int & input_width, const int & input_height, const int & input_width_zero_padding, const int & input_height_zero_padding, const int & extent_width, const int & extent_height, const int & spacing, const int & output_width_zero_padding, const int & output_height_zero_padding, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, bool split_filter_layers) { // Parameters for the Convolution layer assert(source_node_names.size() == input_width * input_height); int input_padded_width = input_width + 2 * input_width_zero_padding; int input_padded_height = input_height + 2 * input_height_zero_padding; int strides_width = input_padded_width; int strides_height = input_padded_height; int output_width = input_padded_width + (extent_width - 1) + input_padded_width * (spacing - 1); int output_height = input_padded_height + (extent_height - 1) + input_padded_height * (spacing - 1); int output_padded_width = output_width + 2 * output_width_zero_padding; int output_padded_height = output_height + 2 * output_height_zero_padding; // Create the shared weights for each filter link for (size_t filter_height_iter = 0; filter_height_iter < extent_height; ++filter_height_iter) { for (size_t filter_width_iter = 0; filter_width_iter < extent_width; ++filter_width_iter) { char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s-%s_H%012d-W%012d", name.data(), module_name.data(), filter_height_iter, filter_width_iter); std::string weight_filter_name(weight_filter_name_char); Weight<TensorT> weight_filter(weight_filter_name, weight_init, solver); weight_filter.setModuleName(module_name); weight_filter.setDropProbability(drop_connection_prob); if (split_filter_layers) weight_filter.setLayerName(module_name); model.addWeights({ weight_filter }); delete[] weight_filter_name_char; } } // Create the projection links between input and output int tmp = 0; for (size_t width_stride_iter = 0; width_stride_iter < strides_width; ++width_stride_iter) { // check if the filter is in the left input width zero padding const int filter_width_end = (spacing - 1) * width_stride_iter + width_stride_iter + extent_width - 1; if (width_stride_iter < input_width_zero_padding) continue; // check if the filter is in the right input width zero padding const int filter_width_start = (spacing - 1) * width_stride_iter + width_stride_iter; if (width_stride_iter >= input_width_zero_padding + input_width) continue; for (size_t height_stride_iter = 0; height_stride_iter < strides_height; ++height_stride_iter) { // check if the filter is in the top input height zero padding const int filter_height_end = (spacing - 1) * height_stride_iter + height_stride_iter + extent_height - 1; if (height_stride_iter < input_height_zero_padding) continue; // check if the filter is in the bottom input height zero padding const int filter_height_start = (spacing - 1) * height_stride_iter + height_stride_iter; if (height_stride_iter >= input_height_zero_padding + input_height) continue; // create the links between input and output int width_iter = width_stride_iter - input_width_zero_padding; int height_iter = height_stride_iter - input_height_zero_padding; int source_node_iter = height_iter + width_iter * input_height; if (source_node_iter >= source_node_names.size()) { //std::cout << "WARNING: node size has been exceeded!" << std::endl; break; } int filter_width_iter = 0; for (size_t filter_width_pos = filter_width_start; filter_width_pos <= filter_width_end; ++filter_width_pos) { int filter_height_iter = 0; for (size_t filter_height_pos = filter_height_start; filter_height_pos <= filter_height_end; ++filter_height_pos) { // Weight name char* weight_filter_name_char = new char[512]; sprintf(weight_filter_name_char, "%s-%s_H%012d-W%012d", name.data(), module_name.data(), filter_height_iter, filter_width_iter); std::string weight_filter_name(weight_filter_name_char); delete[] weight_filter_name_char; // Output node name char* output_name_char = new char[512]; sprintf(output_name_char, "%s-out_H%012d-W%012d", name.data(), filter_height_pos + output_height_zero_padding, filter_width_pos + output_width_zero_padding); std::string output_name(output_name_char); delete[] output_name_char; // Link name char* link_filter_name_char = new char[512]; sprintf(link_filter_name_char, "%s_to_%s_%s", source_node_names[source_node_iter].data(), output_name.data(), module_name.data()); std::string link_filter_name(link_filter_name_char); delete[] link_filter_name_char; Link link_filter(link_filter_name, source_node_names[source_node_iter], output_name, weight_filter_name); link_filter.setModuleName(module_name); model.addLinks({ link_filter }); ++filter_height_iter; } ++filter_width_iter; } } } } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addNormalization(Model<TensorT> & model, const std::string & name, const std::string & module_name, const std::vector<std::string>& source_node_names, const bool& specify_layers) { std::vector<std::string> node_names; std::string unity_weight_name, negunity_weight_name; // Make the mean/linear node char* mean_name_char = new char[512]; sprintf(mean_name_char, "%s-Mean", name.data()); std::string mean_name(mean_name_char); Node<TensorT> mean(mean_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<MeanOp<TensorT>>(MeanOp<TensorT>()), std::make_shared<MeanErrorOp<TensorT>>(MeanErrorOp<TensorT>()), std::make_shared<MeanWeightGradOp<TensorT>>(MeanWeightGradOp<TensorT>())); mean.setModuleName(module_name); if (specify_layers) mean.setLayerName(module_name + "-Mean"); model.addNodes({ mean }); delete[] mean_name_char; //node_names.push_back(mean_name); // Make the variance/inverse sqrt node char* variance_name_char = new char[512]; sprintf(variance_name_char, "%s-Variance", name.data()); std::string variance_name(variance_name_char); Node<TensorT> variance(variance_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<PowOp<TensorT>>(PowOp<TensorT>(-0.5)), std::make_shared<PowGradOp<TensorT>>(PowGradOp<TensorT>(-0.5)), std::make_shared<VarModOp<TensorT>>(VarModOp<TensorT>()), std::make_shared<VarModErrorOp<TensorT>>(VarModErrorOp<TensorT>()), std::make_shared<VarModWeightGradOp<TensorT>>(VarModWeightGradOp<TensorT>())); variance.setModuleName(module_name); if (specify_layers) mean.setLayerName(module_name + "-Variance"); model.addNodes({ variance }); delete[] variance_name_char; for (const std::string& node_name : source_node_names) { // Make the source-mean nodes char* sourceMinMean_name_char = new char[512]; sprintf(sourceMinMean_name_char, "%s-SourceMinMean", node_name.data()); std::string sourceMinMean_name(sourceMinMean_name_char); Node<TensorT> sourceMinMean(sourceMinMean_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); sourceMinMean.setModuleName(module_name); if (specify_layers) sourceMinMean.setLayerName(module_name + "-SourceMinMean"); model.addNodes({ sourceMinMean }); delete[] sourceMinMean_name_char; // Make the normalized nodes char* normalized_name_char = new char[512]; sprintf(normalized_name_char, "%s-Normalized", node_name.data()); std::string normalized_name(normalized_name_char); Node<TensorT> normalized(normalized_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); normalized.setModuleName(module_name); if (specify_layers) normalized.setLayerName(module_name + "-Normalized"); model.addNodes({ normalized }); node_names.push_back(normalized_name); delete[] normalized_name_char; // Make the weights/links from source to mean unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", node_name, mean_name, specify_layers); char* sToM_link_name_char = new char[512]; sprintf(sToM_link_name_char, "%s_to_%s", node_name.data(), mean_name.data()); std::string sToM_link_name(sToM_link_name_char); Link sToM_link(sToM_link_name, node_name, mean_name, unity_weight_name); sToM_link.setModuleName(module_name); model.addLinks({ sToM_link }); delete[] sToM_link_name_char; // Make the links from source to sourceMinMean unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", node_name, sourceMinMean_name, specify_layers); char* sToSMinM_link_name_char = new char[512]; sprintf(sToSMinM_link_name_char, "%s_to_%s", node_name.data(), sourceMinMean_name.data()); std::string sToSMinM_link_name(sToSMinM_link_name_char); Link sToSMinM_link(sToSMinM_link_name, node_name, sourceMinMean_name, unity_weight_name); sToSMinM_link.setModuleName(module_name); model.addLinks({ sToSMinM_link }); delete[] sToSMinM_link_name_char; // Make the links from the mean to sourceMinMean negunity_weight_name = makeUnityWeight(model, -1.0, module_name, "%s_to_%s", mean_name, sourceMinMean_name, specify_layers); char* mToSMinM_link_name_char = new char[512]; sprintf(mToSMinM_link_name_char, "%s_to_%s", mean_name.data(), sourceMinMean_name.data()); std::string mToSMinM_link_name(mToSMinM_link_name_char); Link mToSMinM_link(mToSMinM_link_name, mean_name, sourceMinMean_name, negunity_weight_name); mToSMinM_link.setModuleName(module_name); model.addLinks({ mToSMinM_link }); delete[] mToSMinM_link_name_char; // Make the links from sourceMinMean to variance unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", sourceMinMean_name, variance_name, specify_layers); char* sMinMToV_link_name_char = new char[512]; sprintf(sMinMToV_link_name_char, "%s_to_%s", sourceMinMean_name.data(), variance_name.data()); std::string sMinMToV_link_name(sMinMToV_link_name_char); Link sMinMToV_link(sMinMToV_link_name, sourceMinMean_name, variance_name, unity_weight_name); sMinMToV_link.setModuleName(module_name); model.addLinks({ sMinMToV_link }); delete[] sMinMToV_link_name_char; // Make the weights/links from sourceMinMean to normalized unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", sourceMinMean_name, normalized_name, specify_layers); char* sMinMToN_link_name_char = new char[512]; sprintf(sMinMToN_link_name_char, "%s_to_%s", sourceMinMean_name.data(), normalized_name.data()); std::string sMinMToN_link_name(sMinMToN_link_name_char); Link sMinMToN_link(sMinMToN_link_name, sourceMinMean_name, normalized_name, unity_weight_name); sMinMToN_link.setModuleName(module_name); model.addLinks({ sMinMToN_link }); delete[] sMinMToN_link_name_char; // Make the links from variance to normalized unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", variance_name, normalized_name); char* vToN_link_name_char = new char[512]; sprintf(vToN_link_name_char, "%s_to_%s", variance_name.data(), normalized_name.data()); std::string vToN_link_name(vToN_link_name_char); Link vToN_link(vToN_link_name, variance_name, normalized_name, unity_weight_name); vToN_link.setModuleName(module_name); model.addLinks({ vToN_link }); delete[] vToN_link_name_char; } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addUnitScale(Model<TensorT> & model, const std::string & name, const std::string & module_name, const std::vector<std::string>& source_node_names, const bool& specify_layers) { std::vector<std::string> node_names; std::string unity_weight_name, negunity_weight_name; // Make the max/linear node char* max_name_char = new char[512]; sprintf(max_name_char, "%s-Max", name.data()); std::string max_name(max_name_char); Node<TensorT> max(max_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<MaxOp<TensorT>>(MaxOp<TensorT>()), std::make_shared<MaxErrorOp<TensorT>>(MaxErrorOp<TensorT>()), std::make_shared<MaxWeightGradOp<TensorT>>(MaxWeightGradOp<TensorT>())); max.setModuleName(module_name); if (specify_layers) max.setLayerName(module_name + "-MinMax"); model.addNodes({ max }); delete[] max_name_char; // Make the min/linear node char* min_name_char = new char[512]; sprintf(min_name_char, "%s-Min", name.data()); std::string min_name(min_name_char); Node<TensorT> min(min_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<MinOp<TensorT>>(MinOp<TensorT>()), std::make_shared<MinErrorOp<TensorT>>(MinErrorOp<TensorT>()), std::make_shared<MinWeightGradOp<TensorT>>(MinWeightGradOp<TensorT>())); min.setModuleName(module_name); if (specify_layers) min.setLayerName(module_name + "-MinMax"); model.addNodes({ min }); delete[] min_name_char; // Make the sum/inverse sqrt node char* scalar_name_char = new char[512]; sprintf(scalar_name_char, "%s-Scalar", name.data()); std::string scalar_name(scalar_name_char); Node<TensorT> scalar(scalar_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<PowOp<TensorT>>(PowOp<TensorT>(-0.5)), std::make_shared<PowGradOp<TensorT>>(PowGradOp<TensorT>(-0.5)), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); scalar.setModuleName(module_name); if (specify_layers) max.setLayerName(module_name + "-Scalar"); model.addNodes({ scalar }); delete[] scalar_name_char; // Make the link from the max to the scalar node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", max_name, scalar_name, specify_layers); char* maxToScalar_link_name_char = new char[512]; sprintf(maxToScalar_link_name_char, "%s_to_%s", max_name.data(), scalar_name.data()); std::string maxToScalar_link_name(maxToScalar_link_name_char); Link maxToScalar_link(maxToScalar_link_name, max_name, scalar_name, unity_weight_name); maxToScalar_link.setModuleName(module_name); model.addLinks({ maxToScalar_link }); delete[] maxToScalar_link_name_char; // Make the link from the min to the scalar node unity_weight_name = makeUnityWeight(model, -1.0, module_name, "%s_to_%s", min_name, scalar_name, specify_layers); char* minToScalar_link_name_char = new char[512]; sprintf(minToScalar_link_name_char, "%s_to_%s", min_name.data(), scalar_name.data()); std::string minToScalar_link_name(minToScalar_link_name_char); Link minToScalar_link(minToScalar_link_name, min_name, scalar_name, unity_weight_name); minToScalar_link.setModuleName(module_name); model.addLinks({ minToScalar_link }); delete[] minToScalar_link_name_char; for (const std::string& node_name : source_node_names) { // Make the unitScale nodes char* unitScale_name_char = new char[512]; sprintf(unitScale_name_char, "%s-UnitScaled", node_name.data()); std::string unitScale_name(unitScale_name_char); Node<TensorT> unitScale(unitScale_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); unitScale.setModuleName(module_name); if (specify_layers) unitScale.setLayerName(module_name + "-UnitScaled"); model.addNodes({ unitScale }); node_names.push_back(unitScale_name); delete[] unitScale_name_char; // Make the weights/links from source to max unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", node_name, max_name, specify_layers); char* sToM_link_name_char = new char[512]; sprintf(sToM_link_name_char, "%s_to_%s", node_name.data(), max_name.data()); std::string sToM_link_name(sToM_link_name_char); Link sToM_link(sToM_link_name, node_name, max_name, unity_weight_name); sToM_link.setModuleName(module_name); model.addLinks({ sToM_link }); delete[] sToM_link_name_char; // Make the weights/links from source to min unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", node_name, min_name, specify_layers); char* sToMin_link_name_char = new char[512]; sprintf(sToMin_link_name_char, "%s_to_%s", node_name.data(), min_name.data()); std::string sToMin_link_name(sToMin_link_name_char); Link sToMin_link(sToMin_link_name, node_name, min_name, unity_weight_name); sToMin_link.setModuleName(module_name); model.addLinks({ sToMin_link }); delete[] sToMin_link_name_char; // Make the links from scalar to unitScale unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", scalar_name, unitScale_name); char* vToN_link_name_char = new char[512]; sprintf(vToN_link_name_char, "%s_to_%s", scalar_name.data(), unitScale_name.data()); std::string vToN_link_name(vToN_link_name_char); Link vToN_link(vToN_link_name, scalar_name, unitScale_name, unity_weight_name); vToN_link.setModuleName(module_name); model.addLinks({ vToN_link }); delete[] vToN_link_name_char; } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addLinearScale(Model<TensorT> & model, const std::string & name, const std::string & module_name, const std::vector<std::string>& source_node_names, const TensorT& range_min, const TensorT& range_max, const bool& specify_layers) { std::vector<std::string> node_names; std::string unity_weight_name, negunity_weight_name; // Make the max/linear node char* max_name_char = new char[512]; sprintf(max_name_char, "%s-Max", name.data()); std::string max_name(max_name_char); Node<TensorT> max(max_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<MaxOp<TensorT>>(MaxOp<TensorT>()), std::make_shared<MaxErrorOp<TensorT>>(MaxErrorOp<TensorT>()), std::make_shared<MaxWeightGradOp<TensorT>>(MaxWeightGradOp<TensorT>())); max.setModuleName(module_name); if (specify_layers) max.setLayerName(module_name + "-MinMax"); model.addNodes({ max }); delete[] max_name_char; // Make the min/linear node char* min_name_char = new char[512]; sprintf(min_name_char, "%s-Min", name.data()); std::string min_name(min_name_char); Node<TensorT> min(min_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<MinOp<TensorT>>(MinOp<TensorT>()), std::make_shared<MinErrorOp<TensorT>>(MinErrorOp<TensorT>()), std::make_shared<MinWeightGradOp<TensorT>>(MinWeightGradOp<TensorT>())); min.setModuleName(module_name); if (specify_layers) min.setLayerName(module_name + "-DomainMinMax"); model.addNodes({ min }); delete[] min_name_char; // Make the sum/inverse node char* scalar_name_char = new char[512]; sprintf(scalar_name_char, "%s-Scalar", name.data()); std::string scalar_name(scalar_name_char); Node<TensorT> scalar(scalar_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<InverseOp<TensorT>>(InverseOp<TensorT>()), std::make_shared<InverseGradOp<TensorT>>(InverseGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); scalar.setModuleName(module_name); if (specify_layers) max.setLayerName(module_name + "-DomainScalar"); model.addNodes({ scalar }); delete[] scalar_name_char; // Make the link from the max to the scalar node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", max_name, scalar_name, specify_layers); char* maxToScalar_link_name_char = new char[512]; sprintf(maxToScalar_link_name_char, "%s_to_%s", max_name.data(), scalar_name.data()); std::string maxToScalar_link_name(maxToScalar_link_name_char); Link maxToScalar_link(maxToScalar_link_name, max_name, scalar_name, unity_weight_name); maxToScalar_link.setModuleName(module_name); model.addLinks({ maxToScalar_link }); delete[] maxToScalar_link_name_char; // Make the link from the min to the scalar node unity_weight_name = makeUnityWeight(model, -1.0, module_name, "%s_to_%s", min_name, scalar_name, specify_layers); char* minToScalar_link_name_char = new char[512]; sprintf(minToScalar_link_name_char, "%s_to_%s", min_name.data(), scalar_name.data()); std::string minToScalar_link_name(minToScalar_link_name_char); Link minToScalar_link(minToScalar_link_name, min_name, scalar_name, unity_weight_name); minToScalar_link.setModuleName(module_name); model.addLinks({ minToScalar_link }); delete[] minToScalar_link_name_char; // Make the range max minus min bias char* rangeMaxMinBias_name_char = new char[512]; sprintf(rangeMaxMinBias_name_char, "%s-RangeMaxMinBias", module_name.data()); std::string rangeMaxMinBias_name(rangeMaxMinBias_name_char); Node<TensorT> rangeMaxMinBias(rangeMaxMinBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); rangeMaxMinBias.setModuleName(module_name); if (specify_layers) rangeMaxMinBias.setLayerName(module_name + "-RangeMaxMinBias"); model.addNodes({ rangeMaxMinBias }); delete[] rangeMaxMinBias_name_char; // Make the range min bias char* rangeMinBias_name_char = new char[512]; sprintf(rangeMinBias_name_char, "%s-RangeMinBias", module_name.data()); std::string rangeMinBias_name(rangeMinBias_name_char); Node<TensorT> rangeMinBias(rangeMinBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); rangeMinBias.setModuleName(module_name); if (specify_layers) rangeMinBias.setLayerName(module_name + "-RangeMinBias"); model.addNodes({ rangeMinBias }); delete[] rangeMinBias_name_char; for (const std::string& node_name : source_node_names) { // Make the dMinOffset nodes char* dMinOffset_name_char = new char[512]; sprintf(dMinOffset_name_char, "%s-DomainMinOffset", node_name.data()); std::string dMinOffset_name(dMinOffset_name_char); Node<TensorT> dMinOffset(dMinOffset_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); dMinOffset.setModuleName(module_name); if (specify_layers) dMinOffset.setLayerName(module_name + "-DomainMinOffset"); model.addNodes({ dMinOffset }); delete[] dMinOffset_name_char; // Make the weights/links from source to max unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", node_name, max_name, specify_layers); char* sToM_link_name_char = new char[512]; sprintf(sToM_link_name_char, "%s_to_%s", node_name.data(), max_name.data()); std::string sToM_link_name(sToM_link_name_char); Link sToM_link(sToM_link_name, node_name, max_name, unity_weight_name); sToM_link.setModuleName(module_name); model.addLinks({ sToM_link }); delete[] sToM_link_name_char; // Make the weights/links from source to min unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", node_name, min_name, specify_layers); char* sToMin_link_name_char = new char[512]; sprintf(sToMin_link_name_char, "%s_to_%s", node_name.data(), min_name.data()); std::string sToMin_link_name(sToMin_link_name_char); Link sToMin_link(sToMin_link_name, node_name, min_name, unity_weight_name); sToMin_link.setModuleName(module_name); model.addLinks({ sToMin_link }); delete[] sToMin_link_name_char; // Make the weights/links from the min to the dMinOffset unity_weight_name = makeUnityWeight(model, -1.0, module_name, "%s_to_%s", min_name, dMinOffset_name, specify_layers); char* minToDMinOffset_link_name_char = new char[512]; sprintf(minToDMinOffset_link_name_char, "%s_to_%s", min_name.data(), dMinOffset_name.data()); std::string minToDMinOffset_link_name(minToDMinOffset_link_name_char); Link minToDMinOffset_link(minToDMinOffset_link_name, min_name, dMinOffset_name, unity_weight_name); minToDMinOffset_link.setModuleName(module_name); model.addLinks({ minToDMinOffset_link }); delete[] minToDMinOffset_link_name_char; // Make the weights/links from the source to the dMinOffset unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", node_name, dMinOffset_name, specify_layers); char* sToDMinOffset_link_name_char = new char[512]; sprintf(sToDMinOffset_link_name_char, "%s_to_%s", node_name.data(), dMinOffset_name.data()); std::string sToDMinOffset_link_name(sToDMinOffset_link_name_char); Link sToDMinOffset_link(sToDMinOffset_link_name, node_name, dMinOffset_name, unity_weight_name); sToDMinOffset_link.setModuleName(module_name); model.addLinks({ sToDMinOffset_link }); delete[] sToDMinOffset_link_name_char; // Make the domainScale node char* dScale_name_char = new char[512]; sprintf(dScale_name_char, "%s-DomainScaled", node_name.data()); std::string dScale_name(dScale_name_char); Node<TensorT> dScale(dScale_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); dScale.setModuleName(module_name); if (specify_layers) dScale.setLayerName(module_name + "-DomainScaled"); model.addNodes({ dScale }); delete[] dScale_name_char; // Make the links/weights from the scalar to the domainScale node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", scalar_name, dScale_name, specify_layers); char* scalarToDScale_link_name_char = new char[512]; sprintf(scalarToDScale_link_name_char, "%s_to_%s", scalar_name.data(), dScale_name.data()); std::string scalarToDScale_link_name(scalarToDScale_link_name_char); Link scalarToDScale_link(scalarToDScale_link_name, scalar_name, dScale_name, unity_weight_name); scalarToDScale_link.setModuleName(module_name); model.addLinks({ scalarToDScale_link }); delete[] scalarToDScale_link_name_char; // Make the links/weights from the dMinOffset to the domainScale nodes unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", dMinOffset_name, dScale_name, specify_layers); char* dMinOffsetToDScale_link_name_char = new char[512]; sprintf(dMinOffsetToDScale_link_name_char, "%s_to_%s", dMinOffset_name.data(), dScale_name.data()); std::string dMinOffsetToDScale_link_name(dMinOffsetToDScale_link_name_char); Link dMinOffsetToDScale_link(dMinOffsetToDScale_link_name, dMinOffset_name, dScale_name, unity_weight_name); dMinOffsetToDScale_link.setModuleName(module_name); model.addLinks({ dMinOffsetToDScale_link }); delete[] dMinOffsetToDScale_link_name_char; // Make the rangeMaxMinScale node char* rangeMaxMinScale_name_char = new char[512]; sprintf(rangeMaxMinScale_name_char, "%s-RangeMaxMinScale", node_name.data()); std::string rangeMaxMinScale_name(rangeMaxMinScale_name_char); Node<TensorT> rangeMaxMinScale(rangeMaxMinScale_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); rangeMaxMinScale.setModuleName(module_name); if (specify_layers) rangeMaxMinScale.setLayerName(module_name + "-RangeMaxMinScale"); model.addNodes({ rangeMaxMinScale }); delete[] rangeMaxMinScale_name_char; // Make the links/weights from the rangeMaxMin to the rangeMaxMinScale node char* rangeMaxMinToRangeMaxMinScale_weight_name_char = new char[512]; sprintf(rangeMaxMinToRangeMaxMinScale_weight_name_char, "%s_to_%s", rangeMaxMinBias_name.data(), rangeMaxMinScale_name.data()); std::string rangeMaxMinToRangeMaxMinScale_weight_name(rangeMaxMinToRangeMaxMinScale_weight_name_char); std::shared_ptr<WeightInitOp<TensorT>> rangeMaxMinToRangeMaxMinScale_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(range_max - range_min)); std::shared_ptr<SolverOp<TensorT>> rangeMaxMinToRangeMaxMinScale_solver = std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()); Weight<TensorT> rangeMaxMinToRangeMaxMinScale_weight(rangeMaxMinToRangeMaxMinScale_weight_name, rangeMaxMinToRangeMaxMinScale_weight_init, rangeMaxMinToRangeMaxMinScale_solver); rangeMaxMinToRangeMaxMinScale_weight.setModuleName(module_name); model.addWeights({ rangeMaxMinToRangeMaxMinScale_weight }); delete[] rangeMaxMinToRangeMaxMinScale_weight_name_char; char* rangeMaxMinToRangeMaxMinScale_link_name_char = new char[512]; sprintf(rangeMaxMinToRangeMaxMinScale_link_name_char, "%s_to_%s", rangeMaxMinBias_name.data(), rangeMaxMinScale_name.data()); std::string rangeMaxMinToRangeMaxMinScale_link_name(rangeMaxMinToRangeMaxMinScale_link_name_char); Link rangeMaxMinToRangeMaxMinScale_link(rangeMaxMinToRangeMaxMinScale_link_name, rangeMaxMinBias_name, rangeMaxMinScale_name, rangeMaxMinToRangeMaxMinScale_weight_name); rangeMaxMinToRangeMaxMinScale_link.setModuleName(module_name); model.addLinks({ rangeMaxMinToRangeMaxMinScale_link }); delete[] rangeMaxMinToRangeMaxMinScale_link_name_char; // Make the links/weights from the domainScale nodes to the rangeMaxMinScale node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", dScale_name, rangeMaxMinScale_name, specify_layers); char* dScaleToRMaxMinScale_link_name_char = new char[512]; sprintf(dScaleToRMaxMinScale_link_name_char, "%s_to_%s", dScale_name.data(), rangeMaxMinScale_name.data()); std::string dScaleToRMaxMinScale_link_name(dScaleToRMaxMinScale_link_name_char); Link dScaleToRMaxMinScale_link(dScaleToRMaxMinScale_link_name, dScale_name, rangeMaxMinScale_name, unity_weight_name); dScaleToRMaxMinScale_link.setModuleName(module_name); model.addLinks({ dScaleToRMaxMinScale_link }); delete[] dScaleToRMaxMinScale_link_name_char; // Make the LinearScaleFunctor node char* linearScale_name_char = new char[512]; sprintf(linearScale_name_char, "%s-LinearScaleFunctor", node_name.data()); std::string linearScale_name(linearScale_name_char); Node<TensorT> linearScale(linearScale_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); linearScale.setModuleName(module_name); if (specify_layers) linearScale.setLayerName(module_name + "-LinearScaleFunctor"); model.addNodes({ linearScale }); node_names.push_back(linearScale_name); delete[] linearScale_name_char; // Make the links/weights from the rangeMin node to the LinearScaleFunctor node char* rangeMinToLinearScale_weight_name_char = new char[512]; sprintf(rangeMinToLinearScale_weight_name_char, "%s_to_%s", rangeMinBias_name.data(), linearScale_name.data()); std::string rangeMinToLinearScale_weight_name(rangeMinToLinearScale_weight_name_char); std::shared_ptr<WeightInitOp<TensorT>> rangeMinToLinearScale_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(range_min)); std::shared_ptr<SolverOp<TensorT>> rangeMinToLinearScale_solver = std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()); Weight<TensorT> rangeMinToLinearScale_weight(rangeMinToLinearScale_weight_name, rangeMinToLinearScale_weight_init, rangeMinToLinearScale_solver); rangeMinToLinearScale_weight.setModuleName(module_name); model.addWeights({ rangeMinToLinearScale_weight }); delete[] rangeMinToLinearScale_weight_name_char; char* rangeMinToLinearScale_link_name_char = new char[512]; sprintf(rangeMinToLinearScale_link_name_char, "%s_to_%s", rangeMinBias_name.data(), linearScale_name.data()); std::string rangeMinToLinearScale_link_name(rangeMinToLinearScale_link_name_char); Link rangeMinToLinearScale_link(rangeMinToLinearScale_link_name, rangeMinBias_name, linearScale_name, rangeMinToLinearScale_weight_name); rangeMinToLinearScale_link.setModuleName(module_name); model.addLinks({ rangeMinToLinearScale_link }); delete[] rangeMinToLinearScale_link_name_char; // Make the links/weights from the rangeMaxMinScale node to the LinearScaleFunctor node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", rangeMaxMinScale_name, linearScale_name, specify_layers); char* rMaxMinScaleToLinearScale_link_name_char = new char[512]; sprintf(rMaxMinScaleToLinearScale_link_name_char, "%s_to_%s", rangeMaxMinScale_name.data(), linearScale_name.data()); std::string rMaxMinScaleToLinearScale_link_name(rMaxMinScaleToLinearScale_link_name_char); Link rMaxMinScaleToLinearScale_link(rMaxMinScaleToLinearScale_link_name, rangeMaxMinScale_name, linearScale_name, unity_weight_name); rMaxMinScaleToLinearScale_link.setModuleName(module_name); model.addLinks({ rMaxMinScaleToLinearScale_link }); delete[] rMaxMinScaleToLinearScale_link_name_char; } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addGaussianEncoding(Model<TensorT> & model, const std::string & name, const std::string & module_name, const std::vector<std::string>& mu_node_names, const std::vector<std::string>& logvar_node_names, const bool& specify_layer) { std::vector<std::string> node_names; std::string unity_weight_name, scalar_weight_name; assert(mu_node_names.size() == logvar_node_names.size()); // Specify the layer names for the mu and logvar nodes in order // to ensure they are placed on different tensors during model interpretation for (const std::string& node_name : mu_node_names) { model.nodes_.at(node_name)->setLayerName("VAE_Mu"); } for (const std::string& node_name : logvar_node_names) { model.nodes_.at(node_name)->setLayerName("VAE_LogVar"); } for (size_t i = 0; i < logvar_node_names.size(); ++i) { // Make the logVar scalar nodes char* logvarScale_name_char = new char[512]; sprintf(logvarScale_name_char, "%s-Scalar", logvar_node_names[i].data()); std::string logvarScale_name(logvarScale_name_char); Node<TensorT> logvarScale(logvarScale_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<ExponentialOp<TensorT>>(ExponentialOp<TensorT>()), std::make_shared<ExponentialGradOp<TensorT>>(ExponentialGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); logvarScale.setModuleName(module_name); if (specify_layer) logvarScale.setLayerName(module_name + "-Scalar"); model.addNodes({ logvarScale }); delete[] logvarScale_name_char; //node_names.push_back(logvarScale_name); // Make the links from logvar to the scalar node scalar_weight_name = makeUnityWeight(model, 0.5, module_name, "%s_to_%s", logvar_node_names[i], logvarScale_name); char* lvToS_link_name_char = new char[512]; sprintf(lvToS_link_name_char, "%s_to_%s", logvar_node_names[i].data(), logvarScale_name.data()); std::string lvToS_link_name(lvToS_link_name_char); Link lvToS_link(lvToS_link_name, logvar_node_names[i], logvarScale_name, scalar_weight_name); lvToS_link.setModuleName(module_name); model.addLinks({ lvToS_link }); delete[] lvToS_link_name_char; // Make the sampler nodes char* sampler_name_char = new char[512]; sprintf(sampler_name_char, "%s_%012d-Sampler", name.data(), i); std::string sampler_name(sampler_name_char); Node<TensorT> sampler(sampler_name, NodeType::input, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); sampler.setModuleName(module_name); if (specify_layer) sampler.setLayerName(module_name + "-Sampler"); model.addNodes({ sampler }); delete[] sampler_name_char; //node_names.push_back(sampler_name); // Make the stddev nodes char* stddev_name_char = new char[512]; sprintf(stddev_name_char, "%s-StdDev", logvar_node_names[i].data()); std::string stddev_name(stddev_name_char); Node<TensorT> stddev(stddev_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); stddev.setModuleName(module_name); if (specify_layer) stddev.setLayerName(module_name + "-StdDev"); model.addNodes({ stddev }); delete[] stddev_name_char; //node_names.push_back(stddev_name); // Make the links from logvar scalar node to the std dev node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", logvarScale_name, stddev_name); char* ScToStdev_link_name_char = new char[512]; sprintf(ScToStdev_link_name_char, "%s_to_%s", logvarScale_name.data(), stddev_name.data()); std::string ScToStdev_link_name(ScToStdev_link_name_char); Link ScToStdev_link(ScToStdev_link_name, logvarScale_name, stddev_name, unity_weight_name); ScToStdev_link.setModuleName(module_name); model.addLinks({ ScToStdev_link }); delete[] ScToStdev_link_name_char; // Make the links from sampler to the std dev node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", sampler_name, stddev_name); char* SToStdev_link_name_char = new char[512]; sprintf(SToStdev_link_name_char, "%s_to_%s", sampler_name.data(), stddev_name.data()); std::string SToStdev_link_name(SToStdev_link_name_char); Link SToStdev_link(SToStdev_link_name, sampler_name, stddev_name, unity_weight_name); SToStdev_link.setModuleName(module_name); model.addLinks({ SToStdev_link }); delete[] SToStdev_link_name_char; // Make the output nodes char* output_name_char = new char[512]; sprintf(output_name_char, "%s_%012d", name.data(), i); std::string output_name(output_name_char); Node<TensorT> output(output_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); output.setModuleName(module_name); if (specify_layer) output.setLayerName(module_name); model.addNodes({ output }); node_names.push_back(output_name); delete[] output_name_char; // Make the links from std dev node to the output node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", stddev_name, output_name); char* StDevToOutput_link_name_char = new char[512]; sprintf(StDevToOutput_link_name_char, "%s_to_%s", stddev_name.data(), output_name.data()); std::string StDevToOutput_link_name(StDevToOutput_link_name_char); Link StDevToOutput_link(StDevToOutput_link_name, stddev_name, output_name, unity_weight_name); StDevToOutput_link.setModuleName(module_name); model.addLinks({ StDevToOutput_link }); delete[] StDevToOutput_link_name_char; // Make the links from mean to the output node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", mu_node_names[i], output_name); char* muToOutput_link_name_char = new char[512]; sprintf(muToOutput_link_name_char, "%s_to_%s", mu_node_names[i].data(), output_name.data()); std::string muToOutput_link_name(muToOutput_link_name_char); Link muToOutput_link(muToOutput_link_name, mu_node_names[i], output_name, unity_weight_name); muToOutput_link.setModuleName(module_name); model.addLinks({ muToOutput_link }); delete[] muToOutput_link_name_char; } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addCategoricalEncoding(Model<TensorT> & model, const std::string & name, const std::string & module_name, const std::vector<std::string>& alpha_node_names, const bool& specify_layer) { std::vector<std::string> softmax_args_names; std::string unity_weight_name, scalar_weight_name; for (size_t i = 0; i < alpha_node_names.size(); ++i) { // Make the sampler nodes char* sampler_name_char = new char[512]; sprintf(sampler_name_char, "%s_%012d-GumbelSampler", name.data(), i); std::string sampler_name(sampler_name_char); Node<TensorT> sampler(sampler_name, NodeType::input, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); sampler.setModuleName(module_name); if (specify_layer) sampler.setLayerName(module_name + "-GumbelSampler"); model.addNodes({ sampler }); delete[] sampler_name_char; //node_names.push_back(sampler_name); // Make the logAlphaScale node char* logalphaScale_name_char = new char[512]; sprintf(logalphaScale_name_char, "%s_%012d-Scalar", name.data(), i); std::string logalphaScale_name(logalphaScale_name_char); Node<TensorT> logalphaScale(logalphaScale_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LogOp<TensorT>>(LogOp<TensorT>()), std::make_shared<LogGradOp<TensorT>>(LogGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); logalphaScale.setModuleName(module_name); if (specify_layer) logalphaScale.setLayerName(module_name + "-Scalar"); model.addNodes({ logalphaScale }); delete[] logalphaScale_name_char; //node_names.push_back(logalphaScale_name); // Make the LogAlphaSampler nodes char* logAlphaSampler_name_char = new char[512]; sprintf(logAlphaSampler_name_char, "%s_%012d-LogAlphaSampler", name.data(), i); std::string logAlphaSampler_name(logAlphaSampler_name_char); Node<TensorT> logAlphaSampler(logAlphaSampler_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); logAlphaSampler.setModuleName(module_name); if (specify_layer) logAlphaSampler.setLayerName(module_name + "-LogAlphaSampler"); model.addNodes({ logAlphaSampler }); delete[] logAlphaSampler_name_char; //node_names.push_back(logAlphaSampler); // Make the links from the logAlpha node and sampler node to the logAlphaSamplerSum node scalar_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", sampler_name, logAlphaSampler_name); char* lsToLAS_link_name_char = new char[512]; sprintf(lsToLAS_link_name_char, "%s_to_%s", sampler_name.data(), logAlphaSampler_name.data()); std::string lsToLAS_link_name(lsToLAS_link_name_char); Link lsToLAS_link(lsToLAS_link_name, sampler_name, logAlphaSampler_name, scalar_weight_name); lsToLAS_link.setModuleName(module_name); model.addLinks({ lsToLAS_link }); delete[] lsToLAS_link_name_char; // Make the links from the alpha node to the logAlpha node scalar_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", alpha_node_names[i], logalphaScale_name); char* laToLA_link_name_char = new char[512]; sprintf(laToLA_link_name_char, "%s_to_%s", alpha_node_names[i].data(), logalphaScale_name.data()); std::string laToLA_link_name(laToLA_link_name_char); Link laToLA_link(laToLA_link_name, alpha_node_names[i], logalphaScale_name, scalar_weight_name); laToLA_link.setModuleName(module_name); model.addLinks({ laToLA_link }); delete[] laToLA_link_name_char; // Make the links from the logAlpha node and sampler node to the logAlphaSamplerSum node //scalar_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", alpha_node_names[i], logAlphaSampler_name); scalar_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", logalphaScale_name, logAlphaSampler_name); char* laToLAS_link_name_char = new char[512]; //sprintf(laToLAS_link_name_char, "%s_to_%s", alpha_node_names[i].data(), logAlphaSampler_name.data()); sprintf(laToLAS_link_name_char, "%s_to_%s", logalphaScale_name.data(), logAlphaSampler_name.data()); std::string laToLAS_link_name(laToLAS_link_name_char); //Link laToLAS_link(laToLAS_link_name, alpha_node_names[i], logAlphaSampler_name, scalar_weight_name); Link laToLAS_link(laToLAS_link_name, logalphaScale_name, logAlphaSampler_name, scalar_weight_name); laToLAS_link.setModuleName(module_name); model.addLinks({ laToLAS_link }); delete[] laToLAS_link_name_char; // Make the inverse tau nodes char* tau_name_char = new char[512]; sprintf(tau_name_char, "%s_%012d-InverseTau", name.data(), i); std::string tau_name(tau_name_char); Node<TensorT> tau(tau_name, NodeType::input, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); tau.setModuleName(module_name); if (specify_layer) tau.setLayerName(module_name + "-InverseTau"); model.addNodes({ tau }); delete[] tau_name_char; //node_names.push_back(tau_name) // Make the intermediate nodes before the softmax char* softmaxArgs_name_char = new char[512]; sprintf(softmaxArgs_name_char, "%s_%012d-SoftmaxArgs", name.data(), i); std::string softmaxArgs_name(softmaxArgs_name_char); Node<TensorT> softmaxArgs(softmaxArgs_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); softmaxArgs.setModuleName(module_name); if (specify_layer) softmaxArgs.setLayerName(module_name + "-SoftmaxArgs"); model.addNodes({ softmaxArgs }); softmax_args_names.push_back(softmaxArgs_name); delete[] softmaxArgs_name_char; // Make the links from the LogAlphaSampler node to the SoftmaxArgs node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", logAlphaSampler_name, softmaxArgs_name); char* LasToSa_link_name_char = new char[512]; sprintf(LasToSa_link_name_char, "%s_to_%s", logAlphaSampler_name.data(), softmaxArgs_name.data()); std::string LasToSa_link_name(LasToSa_link_name_char); Link LasToSa_link(LasToSa_link_name, logAlphaSampler_name, softmaxArgs_name, unity_weight_name); LasToSa_link.setModuleName(module_name); model.addLinks({ LasToSa_link }); delete[] LasToSa_link_name_char; // Make the links from the inverseTau node to the SoftmaxArgs node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", tau_name, softmaxArgs_name); char* ItToSa_link_name_char = new char[512]; sprintf(ItToSa_link_name_char, "%s_to_%s", tau_name.data(), softmaxArgs_name.data()); std::string ItToSa_link_name(ItToSa_link_name_char); Link ItToSa_link(ItToSa_link_name, tau_name, softmaxArgs_name, unity_weight_name); ItToSa_link.setModuleName(module_name); model.addLinks({ ItToSa_link }); delete[] ItToSa_link_name_char; } // Make the softmax layer std::vector<std::string> node_names = addStableSoftMax(model, name + "-" + "SoftMax", module_name, softmax_args_names, true); return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addDiscriminator(Model<TensorT> & model, const std::string & name, const std::string & module_name, const std::vector<std::string>& encoding_node_names) { // TODO: add specify layer std::vector<std::string> node_names; std::string unity_weight_name, negative_weight_name; //// Create the unity weight //char* unity_weight_name_char = new char[512]; //sprintf(unity_weight_name_char, "%s_Unity", name.data()); //std::string unity_weight_name(unity_weight_name_char); //Weight<TensorT> unity_weight(unity_weight_name, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); //unity_weight.setModuleName(module_name); //model.addWeights({ unity_weight }); //// Create the negative unity weight //char* negative_weight_name_char = new char[512]; //sprintf(negative_weight_name_char, "%s_NegUnity", name.data()); //std::string negative_weight_name(negative_weight_name_char); //Weight<TensorT> negative_weight(negative_weight_name, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(-1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); //negative_weight.setModuleName(module_name); //model.addWeights({ negative_weight }); for (size_t i = 0; i < encoding_node_names.size(); ++i) { // Make the output node char* output_name_char = new char[512]; sprintf(output_name_char, "%s-Output-%012d", name.data(), i); std::string output_name(output_name_char); Node<TensorT> output(output_name, NodeType::output, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); output.setModuleName(module_name); model.addNodes({ output }); node_names.push_back(output_name); delete[] output_name_char; // Make the links from the encoding to the output node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", encoding_node_names[i], output_name); char* lvToS_link_name_char = new char[512]; sprintf(lvToS_link_name_char, "%s_to_%s", encoding_node_names[i].data(), output_name.data()); std::string lvToS_link_name(lvToS_link_name_char); Link lvToS_link(lvToS_link_name, encoding_node_names[i], output_name, unity_weight_name); lvToS_link.setModuleName(module_name); model.addLinks({ lvToS_link }); delete[] lvToS_link_name_char; // Make the sampler nodes char* sampler_name_char = new char[512]; sprintf(sampler_name_char, "%s-Sampler-%012d", name.data(), i); std::string sampler_name(sampler_name_char); Node<TensorT> sampler(sampler_name, NodeType::input, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); sampler.setModuleName(module_name); model.addNodes({ sampler }); delete[] sampler_name_char; // Make the links from the sampler node to the output node negative_weight_name = makeUnityWeight(model, -1.0, module_name, "%s_to_%s", sampler_name, output_name); char* ScToStdev_link_name_char = new char[512]; sprintf(ScToStdev_link_name_char, "%s_to_%s", sampler_name.data(), output_name.data()); std::string ScToStdev_link_name(ScToStdev_link_name_char); Link ScToStdev_link(ScToStdev_link_name, sampler_name, output_name, negative_weight_name); ScToStdev_link.setModuleName(module_name); model.addLinks({ ScToStdev_link }); delete[] ScToStdev_link_name_char; } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addLSTM(Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int & n_blocks, const int & n_cells, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, bool forget_gate, int block_version, const bool& specify_layer, bool specify_cyclic_pairs) { std::vector<std::string> node_names; for (int block_iter = 0; block_iter < n_blocks; ++block_iter) { // Make the LSTM cell char* name_char = new char[512]; sprintf(name_char, "%s-%012d", name.data(), block_iter); std::string node_name(name_char); delete[] name_char; if (block_version == 1) { std::vector<std::string> output_node_names = addLSTMBlock1(model, node_name, module_name, source_node_names, n_cells, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad, weight_init, solver, drop_out_prob, drop_connection_prob, biases, forget_gate, specify_layer, specify_cyclic_pairs); for (const std::string& node_name : output_node_names) node_names.push_back(node_name); } else if (block_version == 2) { std::vector<std::string> output_node_names = addLSTMBlock2(model, node_name, module_name, source_node_names, n_cells, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad, weight_init, solver, drop_out_prob, drop_connection_prob, biases, forget_gate, specify_layer, specify_cyclic_pairs); for (const std::string& node_name : output_node_names) node_names.push_back(node_name); } } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addLSTMBlock1( Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int & n_cells, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, bool forget_gate, const bool& specify_layer, bool specify_cyclic_pairs) { std::vector<std::string> node_names; std::string unity_weight_name; // Make the input gate node char* blockGateInput_name_char = new char[512]; sprintf(blockGateInput_name_char, "%s-BlockGateInput", name.data()); std::string blockGateInput_name(blockGateInput_name_char); Node<TensorT> blockGateInput(blockGateInput_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), node_integration, node_integration_error, node_integration_weight_grad); blockGateInput.setModuleName(module_name); if (specify_layer) blockGateInput.setLayerName(module_name + "-BlockGateInput"); model.addNodes({ blockGateInput }); delete[] blockGateInput_name_char; // Make the output gate node char* blockGateOutput_name_char = new char[512]; sprintf(blockGateOutput_name_char, "%s-BlockGateOutput", name.data()); std::string blockGateOutput_name(blockGateOutput_name_char); Node<TensorT> blockGateOutput(blockGateOutput_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), node_integration, node_integration_error, node_integration_weight_grad); blockGateOutput.setModuleName(module_name); if (specify_layer) blockGateOutput.setLayerName(module_name + "-BlockGateOutput"); model.addNodes({ blockGateOutput }); delete[] blockGateOutput_name_char; std::string blockGateForget_name; if (forget_gate) { // Make the forget gate node char* blockGateForget_name_char = new char[512]; sprintf(blockGateForget_name_char, "%s-BlockGateForget", name.data()); blockGateForget_name = std::string(blockGateForget_name_char); Node<TensorT> blockGateForget(blockGateForget_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), node_integration, node_integration_error, node_integration_weight_grad); blockGateForget.setModuleName(module_name); if (specify_layer) blockGateForget.setLayerName(module_name + "-BlockGateForget"); model.addNodes({ blockGateForget }); delete[] blockGateForget_name_char; } if (biases) { // biases, links, and weights for input gate, forget gate, and output gate // Make the input gate bias nodes char* iGateBias_name_char = new char[512]; sprintf(iGateBias_name_char, "%s-bias", blockGateInput_name.data()); std::string iGateBias_name(iGateBias_name_char); Node<TensorT> iGateBias(iGateBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); iGateBias.setModuleName(module_name); model.addNodes({ iGateBias }); delete[] iGateBias_name_char; // Make the link between input gate bias node to input gate node char* weight_iGateBias_name_char = new char[512]; sprintf(weight_iGateBias_name_char, "%s_to_%s", iGateBias_name.data(), blockGateInput_name.data()); std::string weight_iGateBias_name(weight_iGateBias_name_char); delete[] weight_iGateBias_name_char; char* link_iGateBias_name_char = new char[512]; sprintf(link_iGateBias_name_char, "%s_to_%s", iGateBias_name.data(), blockGateInput_name.data()); std::string link_iGateBias_name(link_iGateBias_name_char); delete[] link_iGateBias_name_char; std::shared_ptr<WeightInitOp<TensorT>> iGateBias_weight_init; iGateBias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> iGateBias_solver = solver; Weight<TensorT> weight_iGateBias(weight_iGateBias_name, iGateBias_weight_init, iGateBias_solver); weight_iGateBias.setModuleName(module_name); Link link_iGateBias(link_iGateBias_name, iGateBias_name, blockGateInput_name, weight_iGateBias_name); link_iGateBias.setModuleName(module_name); model.addWeights({ weight_iGateBias }); model.addLinks({ link_iGateBias }); if (forget_gate) { // Make the forget gate bias nodes char* fGateBias_name_char = new char[512]; sprintf(fGateBias_name_char, "%s-bias", blockGateForget_name.data()); std::string fGateBias_name(fGateBias_name_char); Node<TensorT> fGateBias(fGateBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); fGateBias.setModuleName(module_name); model.addNodes({ fGateBias }); delete[] fGateBias_name_char; // Make the link between forget gate bias node to forget gate node char* weight_fGateBias_name_char = new char[512]; sprintf(weight_fGateBias_name_char, "%s_to_%s", fGateBias_name.data(), blockGateForget_name.data()); std::string weight_fGateBias_name(weight_fGateBias_name_char); delete[] weight_fGateBias_name_char; char* link_fGateBias_name_char = new char[512]; sprintf(link_fGateBias_name_char, "%s_to_%s", fGateBias_name.data(), blockGateForget_name.data()); std::string link_fGateBias_name(link_fGateBias_name_char); delete[] link_fGateBias_name_char; std::shared_ptr<WeightInitOp<TensorT>> fGateBias_weight_init; fGateBias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> fGateBias_solver = solver; Weight<TensorT> weight_fGateBias(weight_fGateBias_name, fGateBias_weight_init, fGateBias_solver); weight_fGateBias.setModuleName(module_name); Link link_fGateBias(link_fGateBias_name, fGateBias_name, blockGateForget_name, weight_fGateBias_name); link_fGateBias.setModuleName(module_name); model.addWeights({ weight_fGateBias }); model.addLinks({ link_fGateBias }); } // Make the output gate bias nodes char* oGateBias_name_char = new char[512]; sprintf(oGateBias_name_char, "%s-bias", blockGateOutput_name.data()); std::string oGateBias_name(oGateBias_name_char); Node<TensorT> oGateBias(oGateBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); oGateBias.setModuleName(module_name); model.addNodes({ oGateBias }); delete[] oGateBias_name_char; // Make the link between output gate bias node to output gate node char* weight_oGateBias_name_char = new char[512]; sprintf(weight_oGateBias_name_char, "%s_to_%s", oGateBias_name.data(), blockGateOutput_name.data()); std::string weight_oGateBias_name(weight_oGateBias_name_char); delete[] weight_oGateBias_name_char; char* link_oGateBias_name_char = new char[512]; sprintf(link_oGateBias_name_char, "%s_to_%s", oGateBias_name.data(), blockGateOutput_name.data()); std::string link_oGateBias_name(link_oGateBias_name_char); delete[] link_oGateBias_name_char; std::shared_ptr<WeightInitOp<TensorT>> oGateBias_weight_init; oGateBias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> oGateBias_solver = solver; Weight<TensorT> weight_oGateBias(weight_oGateBias_name, oGateBias_weight_init, oGateBias_solver); weight_oGateBias.setModuleName(module_name); Link link_oGateBias(link_oGateBias_name, oGateBias_name, blockGateOutput_name, weight_oGateBias_name); link_oGateBias.setModuleName(module_name); model.addWeights({ weight_oGateBias }); model.addLinks({ link_oGateBias }); } for (const std::string& node_name : source_node_names) { // Make the link from input node to input gate char* weight_iToIGate_name_char = new char[512]; sprintf(weight_iToIGate_name_char, "%s_to_%s", node_name.data(), blockGateInput_name.data()); std::string weight_iToIGate_name(weight_iToIGate_name_char); delete[] weight_iToIGate_name_char; char* link_iToIGate_name_char = new char[512]; sprintf(link_iToIGate_name_char, "%s_to_%s", node_name.data(), blockGateInput_name.data()); std::string link_iToIGate_name(link_iToIGate_name_char); delete[] link_iToIGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> iToIGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> iToIGate_solver = solver; Weight<TensorT> weight_iToIGate(weight_iToIGate_name, iToIGate_weight_init, iToIGate_solver); weight_iToIGate.setModuleName(module_name); Link link_iToIGate(link_iToIGate_name, node_name, blockGateInput_name, weight_iToIGate_name); link_iToIGate.setModuleName(module_name); model.addWeights({ weight_iToIGate }); model.addLinks({ link_iToIGate }); // Make the link from input node to output gate char* weight_iToOGate_name_char = new char[512]; sprintf(weight_iToOGate_name_char, "%s_to_%s", node_name.data(), blockGateOutput_name.data()); std::string weight_iToOGate_name(weight_iToOGate_name_char); delete[] weight_iToOGate_name_char; char* link_iToOGate_name_char = new char[512]; sprintf(link_iToOGate_name_char, "%s_to_%s", node_name.data(), blockGateOutput_name.data()); std::string link_iToOGate_name(link_iToOGate_name_char); delete[] link_iToOGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> iToOGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> iToOGate_solver = solver; Weight<TensorT> weight_iToOGate(weight_iToOGate_name, iToOGate_weight_init, iToOGate_solver); weight_iToOGate.setModuleName(module_name); Link link_iToOGate(link_iToOGate_name, node_name, blockGateOutput_name, weight_iToOGate_name); link_iToOGate.setModuleName(module_name); model.addWeights({ weight_iToOGate }); model.addLinks({ link_iToOGate }); if (forget_gate) { // Make the link from input node to forget gate char* weight_iToFGate_name_char = new char[512]; sprintf(weight_iToFGate_name_char, "%s_to_%s", node_name.data(), blockGateForget_name.data()); std::string weight_iToFGate_name(weight_iToFGate_name_char); delete[] weight_iToFGate_name_char; char* link_iToFGate_name_char = new char[512]; sprintf(link_iToFGate_name_char, "%s_to_%s", node_name.data(), blockGateForget_name.data()); std::string link_iToFGate_name(link_iToFGate_name_char); delete[] link_iToFGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> iToFGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> iToFGate_solver = solver; Weight<TensorT> weight_iToFGate(weight_iToFGate_name, iToFGate_weight_init, iToFGate_solver); weight_iToFGate.setModuleName(module_name); Link link_iToFGate(link_iToFGate_name, node_name, blockGateForget_name, weight_iToFGate_name); link_iToFGate.setModuleName(module_name); model.addWeights({ weight_iToFGate }); model.addLinks({ link_iToFGate }); } } for (int cell_iter = 0; cell_iter < n_cells; ++cell_iter) { // Make the input node char* blockInput_name_char = new char[512]; sprintf(blockInput_name_char, "%s-BlockInput-%012d", name.data(), cell_iter); std::string blockInput_name(blockInput_name_char); Node<TensorT> blockInput(blockInput_name, NodeType::hidden, NodeStatus::initialized, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad); blockInput.setModuleName(module_name); if (specify_layer) blockInput.setLayerName(module_name + "-BlockInput"); blockInput.setDropProbability(drop_out_prob); model.addNodes({ blockInput }); delete[] blockInput_name_char; // Make the input multiplier node char* blockMultInput_name_char = new char[512]; sprintf(blockMultInput_name_char, "%s-BlockMultInput-%012d", name.data(), cell_iter); std::string blockMultInput_name(blockMultInput_name_char); Node<TensorT> blockMultInput(blockMultInput_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); blockMultInput.setModuleName(module_name); if (specify_layer) blockMultInput.setLayerName(module_name + "-BlockMultInput"); model.addNodes({ blockMultInput }); delete[] blockMultInput_name_char; // Make the output multiplier node char* blockOutput_name_char = new char[512]; sprintf(blockOutput_name_char, "%s-BlockMultOutput-%012d", name.data(), cell_iter); std::string blockOutput_name(blockOutput_name_char); Node<TensorT> blockOutput(blockOutput_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); blockOutput.setModuleName(module_name); if (specify_layer) blockOutput.setLayerName(module_name + "-BlockMultOut"); blockOutput.setDropProbability(drop_out_prob); model.addNodes({ blockOutput }); node_names.push_back(blockOutput_name); delete[] blockOutput_name_char; // Make the memory cell char* blockMemoryCell_name_char = new char[512]; sprintf(blockMemoryCell_name_char, "%s-BlockMemoryCell-%012d", name.data(), cell_iter); std::string blockMemoryCell_name(blockMemoryCell_name_char); Node<TensorT> blockMemoryCell(blockMemoryCell_name, NodeType::recursive, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); blockMemoryCell.setModuleName(module_name); model.addNodes({ blockMemoryCell }); delete[] blockMemoryCell_name_char; // Make the link from memory cell to output multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMemoryCell_name, blockOutput_name); char* link_MemCellToOMult_name_char = new char[512]; sprintf(link_MemCellToOMult_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockOutput_name.data()); std::string link_MemCellToOMult_name(link_MemCellToOMult_name_char); Link link_MemCellToOMult(link_MemCellToOMult_name, blockMemoryCell_name, blockOutput_name, unity_weight_name); link_MemCellToOMult.setModuleName(module_name); model.addLinks({ link_MemCellToOMult }); delete[] link_MemCellToOMult_name_char; // Make the link from input multiplier node to memory cell unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMultInput_name, blockMemoryCell_name); char* link_iMultToMemCell_name_char = new char[512]; sprintf(link_iMultToMemCell_name_char, "%s_to_%s", blockMultInput_name.data(), blockMemoryCell_name.data()); std::string link_iMultToMemCell_name(link_iMultToMemCell_name_char); Link link_iMultToMemCell(link_iMultToMemCell_name, blockMultInput_name, blockMemoryCell_name, unity_weight_name); link_iMultToMemCell.setModuleName(module_name); model.addLinks({ link_iMultToMemCell }); delete[] link_iMultToMemCell_name_char; // Make the link between the input and the input multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockInput_name, blockMultInput_name); char* link_iToIMult_name_char = new char[512]; sprintf(link_iToIMult_name_char, "%s_to_%s", blockInput_name.data(), blockMultInput_name.data()); std::string link_iToIMult_name(link_iToIMult_name_char); Link link_iToIMult(link_iToIMult_name, blockInput_name, blockMultInput_name, unity_weight_name); link_iToIMult.setModuleName(module_name); model.addLinks({ link_iToIMult }); delete[] link_iToIMult_name_char; // Make the link between the input gate and the input multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockGateInput_name, blockMultInput_name); char* link_iGateToIMult_name_char = new char[512]; sprintf(link_iGateToIMult_name_char, "%s_to_%s", blockGateInput_name.data(), blockMultInput_name.data()); std::string link_iGateToIMult_name(link_iGateToIMult_name_char); Link link_iGateToIMult(link_iGateToIMult_name, blockGateInput_name, blockMultInput_name, unity_weight_name); link_iGateToIMult.setModuleName(module_name); model.addLinks({ link_iGateToIMult }); delete[] link_iGateToIMult_name_char; // Make the link between the output gate and the output gate multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockGateOutput_name, blockOutput_name); char* link_oGateToOMult_name_char = new char[512]; sprintf(link_oGateToOMult_name_char, "%s_to_%s", blockGateOutput_name.data(), blockOutput_name.data()); std::string link_oGateToOMult_name(link_oGateToOMult_name_char); Link link_oGateToOMult(link_oGateToOMult_name, blockGateOutput_name, blockOutput_name, unity_weight_name); link_oGateToOMult.setModuleName(module_name); model.addLinks({ link_oGateToOMult }); delete[] link_oGateToOMult_name_char; // Make the link between the output multiplier node and the input char* weight_OMultToI_name_char = new char[512]; sprintf(weight_OMultToI_name_char, "%s_to_%s", blockOutput_name.data(), blockInput_name.data()); std::string weight_OMultToI_name(weight_OMultToI_name_char); delete[] weight_OMultToI_name_char; char* link_OMultToI_name_char = new char[512]; sprintf(link_OMultToI_name_char, "%s_to_%s", blockOutput_name.data(), blockInput_name.data()); std::string link_OMultToI_name(link_OMultToI_name_char); delete[] link_OMultToI_name_char; std::shared_ptr<WeightInitOp<TensorT>> OMultToI_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> OMultToI_solver = solver; Weight<TensorT> weight_OMultToI(weight_OMultToI_name, OMultToI_weight_init, OMultToI_solver); weight_OMultToI.setModuleName(module_name); weight_OMultToI.setDropProbability(drop_connection_prob); Link link_OMultToI(link_OMultToI_name, blockOutput_name, blockInput_name, weight_OMultToI_name); link_OMultToI.setModuleName(module_name); model.addWeights({ weight_OMultToI }); model.addLinks({ link_OMultToI }); if (specify_cyclic_pairs) model.addCyclicPairs(std::make_pair(blockOutput_name, blockInput_name)); // Make the link between the output multiplier node and the input gate char* weight_OMultToIGate_name_char = new char[512]; sprintf(weight_OMultToIGate_name_char, "%s_to_%s", blockOutput_name.data(), blockGateInput_name.data()); std::string weight_OMultToIGate_name(weight_OMultToIGate_name_char); delete[] weight_OMultToIGate_name_char; char* link_OMultToIGate_name_char = new char[512]; sprintf(link_OMultToIGate_name_char, "%s_to_%s", blockOutput_name.data(), blockGateInput_name.data()); std::string link_OMultToIGate_name(link_OMultToIGate_name_char); delete[] link_OMultToIGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> OMultToIGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> OMultToIGate_solver = solver; Weight<TensorT> weight_OMultToIGate(weight_OMultToIGate_name, OMultToIGate_weight_init, OMultToIGate_solver); weight_OMultToIGate.setModuleName(module_name); Link link_OMultToIGate(link_OMultToIGate_name, blockOutput_name, blockGateInput_name, weight_OMultToIGate_name); link_OMultToIGate.setModuleName(module_name); model.addWeights({ weight_OMultToIGate }); model.addLinks({ link_OMultToIGate }); if (specify_cyclic_pairs) model.addCyclicPairs(std::make_pair(blockOutput_name, blockGateInput_name)); // Make the forget gate multiplier node char* blockMultForget_name_char = new char[512]; sprintf(blockMultForget_name_char, "%s-BlockMultForget-%012d", name.data(), cell_iter); std::string blockMultForget_name(blockMultForget_name_char); Node<TensorT> blockMultForget(blockMultForget_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()), std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); blockMultForget.setModuleName(module_name); if (specify_layer) blockMultForget.setLayerName(module_name + "-BlockMultForget"); model.addNodes({ blockMultForget }); delete[] blockMultForget_name_char; if (forget_gate) { // Make the link between the forget gate and the forget gate multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockGateForget_name, blockMultForget_name); char* link_fGateToFMult_name_char = new char[512]; sprintf(link_fGateToFMult_name_char, "%s_to_%s", blockGateForget_name.data(), blockMultForget_name.data()); std::string link_fGateToFMult_name(link_fGateToFMult_name_char); Link link_fGateToFMult(link_fGateToFMult_name, blockGateForget_name, blockMultForget_name, unity_weight_name); link_fGateToFMult.setModuleName(module_name); model.addLinks({ link_fGateToFMult }); delete[] link_fGateToFMult_name_char; // Make the link between the output multiplier node and the forget gate char* weight_OMultToFGate_name_char = new char[512]; sprintf(weight_OMultToFGate_name_char, "%s_to_%s", blockOutput_name.data(), blockGateForget_name.data()); std::string weight_OMultToFGate_name(weight_OMultToFGate_name_char); delete[] weight_OMultToFGate_name_char; char* link_OMultToFGate_name_char = new char[512]; sprintf(link_OMultToFGate_name_char, "%s_to_%s", blockOutput_name.data(), blockGateForget_name.data()); std::string link_OMultToFGate_name(link_OMultToFGate_name_char); delete[] link_OMultToFGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> OMultToFGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> OMultToFGate_solver = solver; Weight<TensorT> weight_OMultToFGate(weight_OMultToFGate_name, OMultToFGate_weight_init, OMultToFGate_solver); weight_OMultToFGate.setModuleName(module_name); Link link_OMultToFGate(link_OMultToFGate_name, blockOutput_name, blockGateForget_name, weight_OMultToFGate_name); link_OMultToFGate.setModuleName(module_name); model.addWeights({ weight_OMultToFGate }); model.addLinks({ link_OMultToFGate }); if (specify_cyclic_pairs) model.addCyclicPairs(std::make_pair(blockOutput_name, blockGateForget_name)); } // Make the link from forget gate multiplier node to memory cell unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMultForget_name, blockMemoryCell_name); char* link_fMultToMemCell_name_char = new char[512]; sprintf(link_fMultToMemCell_name_char, "%s_to_%s", blockMultForget_name.data(), blockMemoryCell_name.data()); std::string link_fMultToMemCell_name(link_fMultToMemCell_name_char); Link link_fMultToMemCell(link_fMultToMemCell_name, blockMultForget_name, blockMemoryCell_name, unity_weight_name); link_fMultToMemCell.setModuleName(module_name); model.addLinks({ link_fMultToMemCell }); if (specify_cyclic_pairs) model.addCyclicPairs(std::make_pair(blockMultForget_name, blockMemoryCell_name)); delete[] link_fMultToMemCell_name_char; // Make the link from memory cell to forget gate multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMemoryCell_name, blockMultForget_name); char* link_MemCellToFMult_name_char = new char[512]; sprintf(link_MemCellToFMult_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockMultForget_name.data()); std::string link_MemCellToFMult_name(link_MemCellToFMult_name_char); Link link_MemCellToFMult(link_MemCellToFMult_name, blockMemoryCell_name, blockMultForget_name, unity_weight_name); link_MemCellToFMult.setModuleName(module_name); model.addLinks({ link_MemCellToFMult }); delete[] link_MemCellToFMult_name_char; // Make the link between the output multiplier node and the output gate char* weight_OMultToOGate_name_char = new char[512]; sprintf(weight_OMultToOGate_name_char, "%s_to_%s", blockOutput_name.data(), blockGateOutput_name.data()); std::string weight_OMultToOGate_name(weight_OMultToOGate_name_char); delete[] weight_OMultToOGate_name_char; char* link_OMultToOGate_name_char = new char[512]; sprintf(link_OMultToOGate_name_char, "%s_to_%s", blockOutput_name.data(), blockGateOutput_name.data()); std::string link_OMultToOGate_name(link_OMultToOGate_name_char); delete[] link_OMultToOGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> OMultToOGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> OMultToOGate_solver = solver; Weight<TensorT> weight_OMultToOGate(weight_OMultToOGate_name, OMultToOGate_weight_init, OMultToOGate_solver); weight_OMultToOGate.setModuleName(module_name); Link link_OMultToOGate(link_OMultToOGate_name, blockOutput_name, blockGateOutput_name, weight_OMultToOGate_name); link_OMultToOGate.setModuleName(module_name); model.addWeights({ weight_OMultToOGate }); model.addLinks({ link_OMultToOGate }); if (specify_cyclic_pairs) model.addCyclicPairs(std::make_pair(blockOutput_name, blockGateOutput_name)); if (biases) { // biases, links, and weights for input // Make the input bias nodes char* iBias_name_char = new char[512]; sprintf(iBias_name_char, "%s-bias-%012d", blockInput_name.data(), cell_iter); std::string iBias_name(iBias_name_char); Node<TensorT> iBias(iBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); iBias.setDropProbability(drop_out_prob); iBias.setModuleName(module_name); model.addNodes({ iBias }); delete[] iBias_name_char; // Make the link between input bias node to input node char* weight_iBias_name_char = new char[512]; sprintf(weight_iBias_name_char, "%s_to_%s", iBias_name.data(), blockInput_name.data()); std::string weight_iBias_name(weight_iBias_name_char); delete[] weight_iBias_name_char; char* link_iBias_name_char = new char[512]; sprintf(link_iBias_name_char, "%s_to_%s", iBias_name.data(), blockInput_name.data()); std::string link_iBias_name(link_iBias_name_char); delete[] link_iBias_name_char; std::shared_ptr<WeightInitOp<TensorT>> iBias_weight_init; iBias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> iBias_solver = solver; Weight<TensorT> weight_iBias(weight_iBias_name, iBias_weight_init, iBias_solver); weight_iBias.setModuleName(module_name); weight_iBias.setDropProbability(drop_connection_prob); Link link_iBias(link_iBias_name, iBias_name, blockInput_name, weight_iBias_name); link_iBias.setModuleName(module_name); model.addWeights({ weight_iBias }); model.addLinks({ link_iBias }); } for (const std::string& node_name : source_node_names) { // Make the link form input to block input char* weight_iToIBlock_name_char = new char[512]; sprintf(weight_iToIBlock_name_char, "%s_to_%s", node_name.data(), blockInput_name.data()); std::string weight_iToIBlock_name(weight_iToIBlock_name_char); delete[] weight_iToIBlock_name_char; char* link_iToIBlock_name_char = new char[512]; sprintf(link_iToIBlock_name_char, "%s_to_%s", node_name.data(), blockInput_name.data()); std::string link_iToIBlock_name(link_iToIBlock_name_char); delete[] link_iToIBlock_name_char; std::shared_ptr<WeightInitOp<TensorT>> iToIBlock_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> iToIBlock_solver = solver; Weight<TensorT> weight_iToIBlock(weight_iToIBlock_name, iToIBlock_weight_init, iToIBlock_solver); weight_iToIBlock.setModuleName(module_name); weight_iToIBlock.setDropProbability(drop_connection_prob); Link link_iToIBlock(link_iToIBlock_name, node_name, blockInput_name, weight_iToIBlock_name); link_iToIBlock.setModuleName(module_name); model.addWeights({ weight_iToIBlock }); model.addLinks({ link_iToIBlock }); } } return node_names; } template<typename TensorT> std::vector<std::string> ModelBuilder<TensorT>::addLSTMBlock2( Model<TensorT> & model, const std::string & name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int & n_cells, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, bool forget_gate, const bool& specify_layer, bool specify_cyclic_pairs) { std::vector<std::string> node_names; std::string unity_weight_name; // Make the input gate node char* blockGateInput_name_char = new char[512]; sprintf(blockGateInput_name_char, "%s-BlockGateInput", name.data()); std::string blockGateInput_name(blockGateInput_name_char); Node<TensorT> blockGateInput(blockGateInput_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), node_integration, node_integration_error, node_integration_weight_grad); blockGateInput.setModuleName(module_name); if (specify_layer) blockGateInput.setLayerName(module_name + "-BlockGateInput"); model.addNodes({ blockGateInput }); delete[] blockGateInput_name_char; // Make the output gate node char* blockGateOutput_name_char = new char[512]; sprintf(blockGateOutput_name_char, "%s-BlockGateOutput", name.data()); std::string blockGateOutput_name(blockGateOutput_name_char); Node<TensorT> blockGateOutput(blockGateOutput_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), node_integration, node_integration_error, node_integration_weight_grad); blockGateOutput.setModuleName(module_name); if (specify_layer) blockGateOutput.setLayerName(module_name + "-BlockGateOutput"); model.addNodes({ blockGateOutput }); delete[] blockGateOutput_name_char; std::string blockGateForget_name; if (forget_gate) { // Make the forget gate node char* blockGateForget_name_char = new char[512]; sprintf(blockGateForget_name_char, "%s-BlockGateForget", name.data()); blockGateForget_name = std::string(blockGateForget_name_char); Node<TensorT> blockGateForget(blockGateForget_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), node_integration, node_integration_error, node_integration_weight_grad); blockGateForget.setModuleName(module_name); if (specify_layer) blockGateForget.setLayerName(module_name + "-BlockGateForget"); model.addNodes({ blockGateForget }); delete[] blockGateForget_name_char; } if (biases) { // biases, links, and weights for input gate, forget gate, and output gate // Make the input gate bias nodes char* iGateBias_name_char = new char[512]; sprintf(iGateBias_name_char, "%s-bias", blockGateInput_name.data()); std::string iGateBias_name(iGateBias_name_char); Node<TensorT> iGateBias(iGateBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); iGateBias.setModuleName(module_name); model.addNodes({ iGateBias }); delete[] iGateBias_name_char; // Make the link between input gate bias node to input gate node char* weight_iGateBias_name_char = new char[512]; sprintf(weight_iGateBias_name_char, "%s_to_%s", iGateBias_name.data(), blockGateInput_name.data()); std::string weight_iGateBias_name(weight_iGateBias_name_char); delete[] weight_iGateBias_name_char; char* link_iGateBias_name_char = new char[512]; sprintf(link_iGateBias_name_char, "%s_to_%s", iGateBias_name.data(), blockGateInput_name.data()); std::string link_iGateBias_name(link_iGateBias_name_char); delete[] link_iGateBias_name_char; std::shared_ptr<WeightInitOp<TensorT>> iGateBias_weight_init; iGateBias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> iGateBias_solver = solver; Weight<TensorT> weight_iGateBias(weight_iGateBias_name, iGateBias_weight_init, iGateBias_solver); weight_iGateBias.setModuleName(module_name); Link link_iGateBias(link_iGateBias_name, iGateBias_name, blockGateInput_name, weight_iGateBias_name); link_iGateBias.setModuleName(module_name); model.addWeights({ weight_iGateBias }); model.addLinks({ link_iGateBias }); if (forget_gate) { // Make the forget gate bias nodes char* fGateBias_name_char = new char[512]; sprintf(fGateBias_name_char, "%s-bias", blockGateForget_name.data()); std::string fGateBias_name(fGateBias_name_char); Node<TensorT> fGateBias(fGateBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); fGateBias.setModuleName(module_name); model.addNodes({ fGateBias }); delete[] fGateBias_name_char; // Make the link between forget gate bias node to forget gate node char* weight_fGateBias_name_char = new char[512]; sprintf(weight_fGateBias_name_char, "%s_to_%s", fGateBias_name.data(), blockGateForget_name.data()); std::string weight_fGateBias_name(weight_fGateBias_name_char); delete[] weight_fGateBias_name_char; char* link_fGateBias_name_char = new char[512]; sprintf(link_fGateBias_name_char, "%s_to_%s", fGateBias_name.data(), blockGateForget_name.data()); std::string link_fGateBias_name(link_fGateBias_name_char); delete[] link_fGateBias_name_char; std::shared_ptr<WeightInitOp<TensorT>> fGateBias_weight_init; fGateBias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> fGateBias_solver = solver; Weight<TensorT> weight_fGateBias(weight_fGateBias_name, fGateBias_weight_init, fGateBias_solver); weight_fGateBias.setModuleName(module_name); Link link_fGateBias(link_fGateBias_name, fGateBias_name, blockGateForget_name, weight_fGateBias_name); link_fGateBias.setModuleName(module_name); model.addWeights({ weight_fGateBias }); model.addLinks({ link_fGateBias }); } // Make the output gate bias nodes char* oGateBias_name_char = new char[512]; sprintf(oGateBias_name_char, "%s-bias", blockGateOutput_name.data()); std::string oGateBias_name(oGateBias_name_char); Node<TensorT> oGateBias(oGateBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); oGateBias.setModuleName(module_name); model.addNodes({ oGateBias }); delete[] oGateBias_name_char; // Make the link between output gate bias node to output gate node char* weight_oGateBias_name_char = new char[512]; sprintf(weight_oGateBias_name_char, "%s_to_%s", oGateBias_name.data(), blockGateOutput_name.data()); std::string weight_oGateBias_name(weight_oGateBias_name_char); delete[] weight_oGateBias_name_char; char* link_oGateBias_name_char = new char[512]; sprintf(link_oGateBias_name_char, "%s_to_%s", oGateBias_name.data(), blockGateOutput_name.data()); std::string link_oGateBias_name(link_oGateBias_name_char); delete[] link_oGateBias_name_char; std::shared_ptr<WeightInitOp<TensorT>> oGateBias_weight_init; oGateBias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> oGateBias_solver = solver; Weight<TensorT> weight_oGateBias(weight_oGateBias_name, oGateBias_weight_init, oGateBias_solver); weight_oGateBias.setModuleName(module_name); Link link_oGateBias(link_oGateBias_name, oGateBias_name, blockGateOutput_name, weight_oGateBias_name); link_oGateBias.setModuleName(module_name); model.addWeights({ weight_oGateBias }); model.addLinks({ link_oGateBias }); } for (const std::string& node_name : source_node_names) { // Make the link from input node to input gate char* weight_iToIGate_name_char = new char[512]; sprintf(weight_iToIGate_name_char, "%s_to_%s", node_name.data(), blockGateInput_name.data()); std::string weight_iToIGate_name(weight_iToIGate_name_char); delete[] weight_iToIGate_name_char; char* link_iToIGate_name_char = new char[512]; sprintf(link_iToIGate_name_char, "%s_to_%s", node_name.data(), blockGateInput_name.data()); std::string link_iToIGate_name(link_iToIGate_name_char); delete[] link_iToIGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> iToIGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> iToIGate_solver = solver; Weight<TensorT> weight_iToIGate(weight_iToIGate_name, iToIGate_weight_init, iToIGate_solver); weight_iToIGate.setModuleName(module_name); Link link_iToIGate(link_iToIGate_name, node_name, blockGateInput_name, weight_iToIGate_name); link_iToIGate.setModuleName(module_name); model.addWeights({ weight_iToIGate }); model.addLinks({ link_iToIGate }); // Make the link from input node to output gate char* weight_iToOGate_name_char = new char[512]; sprintf(weight_iToOGate_name_char, "%s_to_%s", node_name.data(), blockGateOutput_name.data()); std::string weight_iToOGate_name(weight_iToOGate_name_char); delete[] weight_iToOGate_name_char; char* link_iToOGate_name_char = new char[512]; sprintf(link_iToOGate_name_char, "%s_to_%s", node_name.data(), blockGateOutput_name.data()); std::string link_iToOGate_name(link_iToOGate_name_char); delete[] link_iToOGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> iToOGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> iToOGate_solver = solver; Weight<TensorT> weight_iToOGate(weight_iToOGate_name, iToOGate_weight_init, iToOGate_solver); weight_iToOGate.setModuleName(module_name); Link link_iToOGate(link_iToOGate_name, node_name, blockGateOutput_name, weight_iToOGate_name); link_iToOGate.setModuleName(module_name); model.addWeights({ weight_iToOGate }); model.addLinks({ link_iToOGate }); if (forget_gate) { // Make the link from input node to forget gate char* weight_iToFGate_name_char = new char[512]; sprintf(weight_iToFGate_name_char, "%s_to_%s", node_name.data(), blockGateForget_name.data()); std::string weight_iToFGate_name(weight_iToFGate_name_char); delete[] weight_iToFGate_name_char; char* link_iToFGate_name_char = new char[512]; sprintf(link_iToFGate_name_char, "%s_to_%s", node_name.data(), blockGateForget_name.data()); std::string link_iToFGate_name(link_iToFGate_name_char); delete[] link_iToFGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> iToFGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> iToFGate_solver = solver; Weight<TensorT> weight_iToFGate(weight_iToFGate_name, iToFGate_weight_init, iToFGate_solver); weight_iToFGate.setModuleName(module_name); Link link_iToFGate(link_iToFGate_name, node_name, blockGateForget_name, weight_iToFGate_name); link_iToFGate.setModuleName(module_name); model.addWeights({ weight_iToFGate }); model.addLinks({ link_iToFGate }); } } for (int cell_iter = 0; cell_iter < n_cells; ++cell_iter) { // Make the input node char* blockInput_name_char = new char[512]; sprintf(blockInput_name_char, "%s-BlockInput-%012d", name.data(), cell_iter); std::string blockInput_name(blockInput_name_char); Node<TensorT> blockInput(blockInput_name, NodeType::hidden, NodeStatus::initialized, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad); blockInput.setModuleName(module_name); blockInput.setDropProbability(drop_out_prob); if (specify_layer) blockInput.setLayerName(module_name + "-BlockInput"); model.addNodes({ blockInput }); delete[] blockInput_name_char; // Make the input multiplier node char* blockMultInput_name_char = new char[512]; sprintf(blockMultInput_name_char, "%s-BlockMultInput-%012d", name.data(), cell_iter); std::string blockMultInput_name(blockMultInput_name_char); Node<TensorT> blockMultInput(blockMultInput_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); blockMultInput.setModuleName(module_name); if (specify_layer) blockMultInput.setLayerName(module_name + "-BlockMultInput"); model.addNodes({ blockMultInput }); delete[] blockMultInput_name_char; // Make the output multiplier node[add drop prob] char* blockOutput_name_char = new char[512]; sprintf(blockOutput_name_char, "%s-BlockMultOutput-%012d", name.data(), cell_iter); std::string blockOutput_name(blockOutput_name_char); Node<TensorT> blockOutput(blockOutput_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); blockOutput.setModuleName(module_name); blockOutput.setDropProbability(drop_out_prob); if (specify_layer) blockOutput.setLayerName(module_name + "-BlockMultOutput"); model.addNodes({ blockOutput }); node_names.push_back(blockOutput_name); delete[] blockOutput_name_char; // Make the memory cell char* blockMemoryCell_name_char = new char[512]; sprintf(blockMemoryCell_name_char, "%s-BlockMemoryCell-%012d", name.data(), cell_iter); std::string blockMemoryCell_name(blockMemoryCell_name_char); Node<TensorT> blockMemoryCell(blockMemoryCell_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); blockMemoryCell.setModuleName(module_name); if (specify_layer) blockMemoryCell.setLayerName(module_name + "-BlockMemoryCell"); model.addNodes({ blockMemoryCell }); delete[] blockMemoryCell_name_char; // Make the link from memory cell to output multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMemoryCell_name, blockOutput_name); char* link_MemCellToOMult_name_char = new char[512]; sprintf(link_MemCellToOMult_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockOutput_name.data()); std::string link_MemCellToOMult_name(link_MemCellToOMult_name_char); Link link_MemCellToOMult(link_MemCellToOMult_name, blockMemoryCell_name, blockOutput_name, unity_weight_name); link_MemCellToOMult.setModuleName(module_name); model.addLinks({ link_MemCellToOMult }); delete[] link_MemCellToOMult_name_char; // Make the link from input multiplier node to memory cell unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMultInput_name, blockMemoryCell_name); char* link_iMultToMemCell_name_char = new char[512]; sprintf(link_iMultToMemCell_name_char, "%s_to_%s", blockMultInput_name.data(), blockMemoryCell_name.data()); std::string link_iMultToMemCell_name(link_iMultToMemCell_name_char); Link link_iMultToMemCell(link_iMultToMemCell_name, blockMultInput_name, blockMemoryCell_name, unity_weight_name); link_iMultToMemCell.setModuleName(module_name); model.addLinks({ link_iMultToMemCell }); delete[] link_iMultToMemCell_name_char; // Make the link between the input and the input multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockInput_name, blockMultInput_name); char* link_iToIMult_name_char = new char[512]; sprintf(link_iToIMult_name_char, "%s_to_%s", blockInput_name.data(), blockMultInput_name.data()); std::string link_iToIMult_name(link_iToIMult_name_char); Link link_iToIMult(link_iToIMult_name, blockInput_name, blockMultInput_name, unity_weight_name); link_iToIMult.setModuleName(module_name); model.addLinks({ link_iToIMult }); delete[] link_iToIMult_name_char; // Make the link between the input gate and the input multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockGateInput_name, blockMultInput_name); char* link_iGateToIMult_name_char = new char[512]; sprintf(link_iGateToIMult_name_char, "%s_to_%s", blockGateInput_name.data(), blockMultInput_name.data()); std::string link_iGateToIMult_name(link_iGateToIMult_name_char); Link link_iGateToIMult(link_iGateToIMult_name, blockGateInput_name, blockMultInput_name, unity_weight_name); link_iGateToIMult.setModuleName(module_name); model.addLinks({ link_iGateToIMult }); delete[] link_iGateToIMult_name_char; // Make the link between the output gate and the output gate multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockGateOutput_name, blockOutput_name); char* link_oGateToOMult_name_char = new char[512]; sprintf(link_oGateToOMult_name_char, "%s_to_%s", blockGateOutput_name.data(), blockOutput_name.data()); std::string link_oGateToOMult_name(link_oGateToOMult_name_char); Link link_oGateToOMult(link_oGateToOMult_name, blockGateOutput_name, blockOutput_name, unity_weight_name); link_oGateToOMult.setModuleName(module_name); model.addLinks({ link_oGateToOMult }); delete[] link_oGateToOMult_name_char; // Make the link between the memory cell node and the input gate char* weight_OMultToIGate_name_char = new char[512]; sprintf(weight_OMultToIGate_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockGateInput_name.data()); std::string weight_OMultToIGate_name(weight_OMultToIGate_name_char); delete[] weight_OMultToIGate_name_char; char* link_OMultToIGate_name_char = new char[512]; sprintf(link_OMultToIGate_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockGateInput_name.data()); std::string link_OMultToIGate_name(link_OMultToIGate_name_char); delete[] link_OMultToIGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> OMultToIGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> OMultToIGate_solver = solver; Weight<TensorT> weight_OMultToIGate(weight_OMultToIGate_name, OMultToIGate_weight_init, OMultToIGate_solver); weight_OMultToIGate.setModuleName(module_name); Link link_OMultToIGate(link_OMultToIGate_name, blockMemoryCell_name, blockGateInput_name, weight_OMultToIGate_name); link_OMultToIGate.setModuleName(module_name); model.addWeights({ weight_OMultToIGate }); model.addLinks({ link_OMultToIGate }); if (forget_gate) { // Make the forget gate multiplier node char* blockMultForget_name_char = new char[512]; sprintf(blockMultForget_name_char, "%s-BlockMultForget-%012d", name.data(), cell_iter); std::string blockMultForget_name(blockMultForget_name_char); Node<TensorT> blockMultForget(blockMultForget_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); blockMultForget.setModuleName(module_name); if (specify_layer) blockMultForget.setLayerName(module_name + "-BlockMultForget"); model.addNodes({ blockMultForget }); delete[] blockMultForget_name_char; // Make the link between the forget gate and the forget gate multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockGateForget_name, blockMultForget_name); char* link_fGateToFMult_name_char = new char[512]; sprintf(link_fGateToFMult_name_char, "%s_to_%s", blockGateForget_name.data(), blockMultForget_name.data()); std::string link_fGateToFMult_name(link_fGateToFMult_name_char); Link link_fGateToFMult(link_fGateToFMult_name, blockGateForget_name, blockMultForget_name, unity_weight_name); link_fGateToFMult.setModuleName(module_name); model.addLinks({ link_fGateToFMult }); delete[] link_fGateToFMult_name_char; // Make the link between the memory cell node and the forget gate char* weight_OMultToFGate_name_char = new char[512]; sprintf(weight_OMultToFGate_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockGateForget_name.data()); std::string weight_OMultToFGate_name(weight_OMultToFGate_name_char); delete[] weight_OMultToFGate_name_char; char* link_OMultToFGate_name_char = new char[512]; sprintf(link_OMultToFGate_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockGateForget_name.data()); std::string link_OMultToFGate_name(link_OMultToFGate_name_char); delete[] link_OMultToFGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> OMultToFGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> OMultToFGate_solver = solver; Weight<TensorT> weight_OMultToFGate(weight_OMultToFGate_name, OMultToFGate_weight_init, OMultToFGate_solver); weight_OMultToFGate.setModuleName(module_name); Link link_OMultToFGate(link_OMultToFGate_name, blockMemoryCell_name, blockGateForget_name, weight_OMultToFGate_name); link_OMultToFGate.setModuleName(module_name); model.addWeights({ weight_OMultToFGate }); model.addLinks({ link_OMultToFGate }); // Make the link from forget gate multiplier node to memory cell unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMultForget_name, blockMemoryCell_name); char* link_fMultToMemCell_name_char = new char[512]; sprintf(link_fMultToMemCell_name_char, "%s_to_%s", blockMultForget_name.data(), blockMemoryCell_name.data()); std::string link_fMultToMemCell_name(link_fMultToMemCell_name_char); Link link_fMultToMemCell(link_fMultToMemCell_name, blockMultForget_name, blockMemoryCell_name, unity_weight_name); link_fMultToMemCell.setModuleName(module_name); model.addLinks({ link_fMultToMemCell }); delete[] link_fMultToMemCell_name_char; // Make the link from memory cell to forget gate multiplier node unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMemoryCell_name, blockMultForget_name); char* link_MemCellToFMult_name_char = new char[512]; sprintf(link_MemCellToFMult_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockMultForget_name.data()); std::string link_MemCellToFMult_name(link_MemCellToFMult_name_char); Link link_MemCellToFMult(link_MemCellToFMult_name, blockMemoryCell_name, blockMultForget_name, unity_weight_name); link_MemCellToFMult.setModuleName(module_name); model.addLinks({ link_MemCellToFMult }); delete[] link_MemCellToFMult_name_char; } else { // Make the link from forget gate multiplier node to memory cell unity_weight_name = makeUnityWeight(model, 1.0, module_name, "%s_to_%s", blockMemoryCell_name, blockMemoryCell_name); char* link_fMultToMemCell_name_char = new char[512]; sprintf(link_fMultToMemCell_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockMemoryCell_name.data()); std::string link_fMultToMemCell_name(link_fMultToMemCell_name_char); Link link_fMultToMemCell(link_fMultToMemCell_name, blockMemoryCell_name, blockMemoryCell_name, unity_weight_name); link_fMultToMemCell.setModuleName(module_name); model.addLinks({ link_fMultToMemCell }); delete[] link_fMultToMemCell_name_char; } // Make the link between the output multiplier node and the output gate char* weight_OMultToOGate_name_char = new char[512]; sprintf(weight_OMultToOGate_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockGateOutput_name.data()); std::string weight_OMultToOGate_name(weight_OMultToOGate_name_char); delete[] weight_OMultToOGate_name_char; char* link_OMultToOGate_name_char = new char[512]; sprintf(link_OMultToOGate_name_char, "%s_to_%s", blockMemoryCell_name.data(), blockGateOutput_name.data()); std::string link_OMultToOGate_name(link_OMultToOGate_name_char); delete[] link_OMultToOGate_name_char; std::shared_ptr<WeightInitOp<TensorT>> OMultToOGate_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> OMultToOGate_solver = solver; Weight<TensorT> weight_OMultToOGate(weight_OMultToOGate_name, OMultToOGate_weight_init, OMultToOGate_solver); weight_OMultToOGate.setModuleName(module_name); Link link_OMultToOGate(link_OMultToOGate_name, blockMemoryCell_name, blockGateOutput_name, weight_OMultToOGate_name); link_OMultToOGate.setModuleName(module_name); model.addWeights({ weight_OMultToOGate }); model.addLinks({ link_OMultToOGate }); if (biases) { // biases, links, and weights for input // Make the input bias nodes char* iBias_name_char = new char[512]; sprintf(iBias_name_char, "%s-bias-%012d", blockInput_name.data(), cell_iter); std::string iBias_name(iBias_name_char); Node<TensorT> iBias(iBias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); iBias.setDropProbability(drop_out_prob); iBias.setModuleName(module_name); model.addNodes({ iBias }); delete[] iBias_name_char; // Make the link between input bias node to input node char* weight_iBias_name_char = new char[512]; sprintf(weight_iBias_name_char, "%s_to_%s", iBias_name.data(), blockInput_name.data()); std::string weight_iBias_name(weight_iBias_name_char); delete[] weight_iBias_name_char; char* link_iBias_name_char = new char[512]; sprintf(link_iBias_name_char, "%s_to_%s", iBias_name.data(), blockInput_name.data()); std::string link_iBias_name(link_iBias_name_char); delete[] link_iBias_name_char; std::shared_ptr<WeightInitOp<TensorT>> iBias_weight_init; iBias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0)); std::shared_ptr<SolverOp<TensorT>> iBias_solver = solver; Weight<TensorT> weight_iBias(weight_iBias_name, iBias_weight_init, iBias_solver); weight_iBias.setModuleName(module_name); weight_iBias.setDropProbability(drop_connection_prob); Link link_iBias(link_iBias_name, iBias_name, blockInput_name, weight_iBias_name); link_iBias.setModuleName(module_name); model.addWeights({ weight_iBias }); model.addLinks({ link_iBias }); } for (const std::string& node_name : source_node_names) { // Make the link form input to block input char* weight_iToIBlock_name_char = new char[512]; sprintf(weight_iToIBlock_name_char, "%s_to_%s", node_name.data(), blockInput_name.data()); std::string weight_iToIBlock_name(weight_iToIBlock_name_char); delete[] weight_iToIBlock_name_char; char* link_iToIBlock_name_char = new char[512]; sprintf(link_iToIBlock_name_char, "%s_to_%s", node_name.data(), blockInput_name.data()); std::string link_iToIBlock_name(link_iToIBlock_name_char); delete[] link_iToIBlock_name_char; std::shared_ptr<WeightInitOp<TensorT>> iToIBlock_weight_init = weight_init; std::shared_ptr<SolverOp<TensorT>> iToIBlock_solver = solver; Weight<TensorT> weight_iToIBlock(weight_iToIBlock_name, iToIBlock_weight_init, iToIBlock_solver); weight_iToIBlock.setModuleName(module_name); weight_iToIBlock.setDropProbability(drop_connection_prob); Link link_iToIBlock(link_iToIBlock_name, node_name, blockInput_name, weight_iToIBlock_name); link_iToIBlock.setModuleName(module_name); model.addWeights({ weight_iToIBlock }); model.addLinks({ link_iToIBlock }); } } return node_names; } template<typename TensorT> inline std::vector<std::string> ModelBuilder<TensorT>::addMultiHeadAttention(Model<TensorT>& model, const std::string & name, const std::string & module_name, const std::vector<std::string>& query_node_names, const std::vector<std::string>& key_node_names, const std::vector<std::string>& values_node_names, const int & n_heads, const std::string & attention_type, const int & model_length, const int & key_length, const int & values_length, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, bool split_attention_layers) { // Create each head and concatenate the results std::vector<std::string> node_names_heads; for (size_t i = 0; i < n_heads; ++i) { std::vector<std::string> node_names_attention; char* name_char = new char[512]; sprintf(name_char, "%s-%012d", name.data(), i); std::string node_name(name_char); delete[] name_char; if (attention_type == "DotProd") { node_names_attention = addDotProdAttention(model, node_name, module_name, query_node_names, key_node_names, values_node_names, key_length, values_length, node_activation, node_activation_grad, weight_init, solver, drop_out_prob, drop_connection_prob, biases, split_attention_layers); } else { std::cout << "Attention type " << attention_type << " was not recognized." << std::endl; } for (std::string& node_name : node_names_attention) node_names_heads.push_back(node_name); } // Matrix multiply the concatenated heads to create the output std::vector<std::string> node_names = addFullyConnected(model, name + "_MultiHead", module_name, node_names_heads, model_length, node_activation, node_activation_grad, std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), weight_init, solver, drop_out_prob, drop_connection_prob, biases, split_attention_layers); return node_names; } template<typename TensorT> inline std::vector<std::string> ModelBuilder<TensorT>::addDotProdAttention(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& query_node_names, const std::vector<std::string>& key_node_names, const std::vector<std::string>& values_node_names, const int& key_length, const int& values_length, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const TensorT& drop_out_prob, const TensorT& drop_connection_prob, const bool& biases, bool split_attention_layers) { std::vector<std::string> node_names; // Make the query network std::vector<std::string> node_names_query = addFullyConnected(model, name + "_query", module_name + "_query", query_node_names, key_length, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), weight_init, solver, drop_out_prob, drop_connection_prob, false, split_attention_layers); // Make the key network std::vector<std::string> node_names_key = addFullyConnected(model, name + "_keys", module_name + "_keys", key_node_names, key_length, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), weight_init, solver, drop_out_prob, drop_connection_prob, false, split_attention_layers); // Make the values network std::vector<std::string> node_names_value = addFullyConnected(model, name + "_values", module_name + "_values", values_node_names, values_length, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), weight_init, solver, drop_out_prob, drop_connection_prob, false, split_attention_layers); // Multiply the key with the values and scale by the squared of the keys_length std::vector<std::string> node_names_scores = addSinglyConnected(model, name + "_scores", module_name + "_scores", node_names_key, node_names_key.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, split_attention_layers); addSinglyConnected(model, module_name + "_scores", node_names_query, node_names_scores, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, split_attention_layers); // Add the scalar TensorT scalar_value = 1/std::sqrt((TensorT)key_length); char* scalar_name_char = new char[512]; sprintf(scalar_name_char, "%s-scalar", name.data()); std::string scalar_name(scalar_name_char); Node<TensorT> scalar(scalar_name, NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); scalar.setModuleName(module_name); if (split_attention_layers) scalar.setLayerName(module_name + "-scalar"); model.addNodes({ scalar }); delete[] scalar_name_char; std::vector<std::string> scalar_nodes = { scalar_name }; addFullyConnected(model, module_name + "_scalar", scalar_nodes, node_names_scores, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(scalar_value)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, split_attention_layers); // Add a stable softmax to create the attention std::vector<std::string> node_names_attention = addStableSoftMax(model, name + "_softMax", module_name, node_names_scores, split_attention_layers); // Multiply the attention with the values node_names = addSinglyConnected(model, name + "_attention", module_name + "_attention", node_names_value, node_names_value.size(), node_activation, node_activation_grad, std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), drop_out_prob, 0.0, false, split_attention_layers); addSinglyConnected(model, module_name, node_names_attention, node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, split_attention_layers); return node_names; } template<typename TensorT> inline std::vector<std::string> ModelBuilder<TensorT>::addScalar(Model<TensorT>& model, const std::string & name, const std::string & module_name, const std::vector<std::string>& source_node_names, const TensorT & scalar_value, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const bool& specify_layer) { // Multiply the key with the values and scale by the squared of the keys_length std::vector<std::string> node_names = addSinglyConnected(model, name, module_name, source_node_names, source_node_names.size(), node_activation, node_activation_grad, std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layer); // Add the scalar char* scalar_name_char = new char[512]; sprintf(scalar_name_char, "%s-scalar", name.data()); std::string scalar_name(scalar_name_char); Node<TensorT> scalar(scalar_name, NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); scalar.setModuleName(module_name); if (specify_layer) scalar.setLayerName(module_name + "-scalar"); model.addNodes({ scalar }); delete[] scalar_name_char; std::vector<std::string> scalar_nodes = { scalar_name }; addFullyConnected(model, module_name, scalar_nodes, node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(scalar_value)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, specify_layer); return node_names; } template<typename TensorT> inline std::vector<std::string> ModelBuilder<TensorT>::addGaussianPosterior(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& mu_node_names, const std::vector<std::string>& logvar_node_names, const std::vector<std::string>& gaussian_node_names, const bool& specify_layer) { // Add the gaussian difference std::vector<std::string> gaussian_posterior_node_names = addGaussian_(model, name, module_name, mu_node_names, logvar_node_names, gaussian_node_names, specify_layer); return gaussian_posterior_node_names; } template<typename TensorT> inline std::vector<std::string> ModelBuilder<TensorT>::addGaussian_(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& mu_node_names, const std::vector<std::string>& logvar_node_names, const std::vector<std::string>& gaussian_node_names, const bool& specify_layer) { // logvar to sigma **2 std::vector<std::string> sigma_node_names = addSinglyConnected(model, name + "-GaussianSigma", module_name + "-GaussianSigma", logvar_node_names, logvar_node_names.size(), std::make_shared<ExponentialOp<TensorT>>(ExponentialOp<TensorT>()), std::make_shared<ExponentialGradOp<TensorT>>(ExponentialGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), TensorT(0), false, specify_layer); std::vector<std::string> sigma2_node_names = addSinglyConnected(model, name + "-GaussianSigma2", module_name + "-GaussianSigma2", sigma_node_names, sigma_node_names.size(), std::make_shared<PowOp<TensorT>>(PowOp<TensorT>(TensorT(2))), std::make_shared<PowGradOp<TensorT>>(PowGradOp<TensorT>(TensorT(2))), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), TensorT(0), false, specify_layer); // sigma**2 to scaling = 1.0 / nd.sqrt(2.0 * np.pi * (sigma ** 2)) std::vector<std::string> scaling_node_names = addSinglyConnected(model, name + "-GaussianScale", module_name + "-GaussianScale", sigma2_node_names, sigma2_node_names.size(), std::make_shared<PowOp<TensorT>>(PowOp<TensorT>(TensorT(-0.5))), std::make_shared<PowGradOp<TensorT>>(PowGradOp<TensorT>(TensorT(-0.5))), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(2.0 * 3.14159265359))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), TensorT(0), false, specify_layer); // x and mu to (x - mu) ** 2 std::vector<std::string> xMinMu2_node_names = addSinglyConnected(model, name + "-GaussianXMinMu2", module_name + "-GaussianXMinMu2", gaussian_node_names, gaussian_node_names.size(), std::make_shared<PowOp<TensorT>>(PowOp<TensorT>(TensorT(2))), std::make_shared<PowGradOp<TensorT>>(PowGradOp<TensorT>(TensorT(2))), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), TensorT(0), false, specify_layer); addSinglyConnected(model, module_name + "-GaussianXMinMu2", mu_node_names, xMinMu2_node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(-1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), specify_layer); // sigma ** 2 to 1 / (2.0 * sigma ** 2) std::vector<std::string> bellSigma_node_names = addSinglyConnected(model, name + "-GaussianBellSigma", module_name + "-GaussianBellSigma", sigma2_node_names, sigma2_node_names.size(), std::make_shared<InverseOp<TensorT>>(InverseOp<TensorT>()), std::make_shared<InverseGradOp<TensorT>>(InverseGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(2.0))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), TensorT(0), false, specify_layer); // (x - mu) ** 2 and 1 / (2.0 * sigma ** 2) to bell = exp(- (x - mu) ** 2 / (2.0 * sigma ** 2)) std::vector<std::string> bell_node_names = addSinglyConnected(model, name + "-GaussianBell", module_name + "-GaussianBell", xMinMu2_node_names, xMinMu2_node_names.size(), std::make_shared<ExponentialOp<TensorT>>(ExponentialOp<TensorT>()), std::make_shared<ExponentialGradOp<TensorT>>(ExponentialGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()), std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(-1.0))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), TensorT(0), false, specify_layer); addSinglyConnected(model, module_name + "-GaussianBell", bellSigma_node_names, bell_node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), specify_layer); // scaling * bell std::vector<std::string> gaussian__node_names = addSinglyConnected(model, name, module_name + "-Gaussian", scaling_node_names, scaling_node_names.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()), std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), TensorT(0), false, specify_layer); addSinglyConnected(model, module_name + "-Gaussian", bell_node_names, gaussian__node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), specify_layer); return gaussian__node_names; } template<typename TensorT> inline std::vector<std::string> ModelBuilder<TensorT>::addMixedGaussianPior(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& gaussian_node_names, const TensorT& logvar_1, const TensorT& logvar_2, const TensorT& pi, const bool& specify_layer) { // Make the mu (i.e., 0) layer and the logvar (i.e., scalar) layers std::vector<std::string> mu_node_names, logvar1_node_names, logvar2_node_names; for (int i = 0; i < gaussian_node_names.size(); ++i) { // Mu char* mu_name_char = new char[512]; sprintf(mu_name_char, "%s-MixedGaussianPriorMu-%012d", name.data(), i); std::string mu_name(mu_name_char); Node<TensorT> mu(mu_name, NodeType::zero, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); mu.setModuleName(module_name + "-MixedGaussianPriorMu"); if (specify_layer) mu.setLayerName(module_name + "-MixedGaussianPriorMu"); model.addNodes({ mu }); mu_node_names.push_back(mu_name); delete[] mu_name_char; // logvar1 char* logvar1_name_char = new char[512]; sprintf(logvar1_name_char, "%s-MixedGaussianPriorLogVar1-%012d", name.data(), i); std::string logvar1_name(logvar1_name_char); Node<TensorT> logvar1(logvar1_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); logvar1.setModuleName(module_name + "-MixedGaussianPriorLogVar1"); if (specify_layer) logvar1.setLayerName(module_name + "-MixedGaussianPriorLogVar1"); model.addNodes({ logvar1 }); logvar1_node_names.push_back(logvar1_name); delete[] logvar1_name_char; // logvar2 char* logvar2_name_char = new char[512]; sprintf(logvar2_name_char, "%s-MixedGaussianPriorLogVar2-%012d", name.data(), i); std::string logvar2_name(logvar2_name_char); Node<TensorT> logvar2(logvar2_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); logvar2.setModuleName(module_name + "-MixedGaussianPriorLogVar2"); if (specify_layer) logvar2.setLayerName(module_name + "-MixedGaussianPriorLogVar2"); model.addNodes({ logvar2 }); logvar2_node_names.push_back(logvar2_name); delete[] logvar2_name_char; } addBiases(model, module_name + "-MixedGaussianPriorLogVar1Bias", logvar1_node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(logvar_1))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), specify_layer); addBiases(model, module_name + "-MixedGaussianPriorLogVar2Bias", logvar2_node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(logvar_2))), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), specify_layer); // Make the two Gaussians std::vector<std::string> gaussian1_node_names = addGaussian_(model, name + "-Gaussian-1", module_name + "-Gaussian-1", mu_node_names, logvar1_node_names, gaussian_node_names, specify_layer); std::vector<std::string> gaussian2_node_names = addGaussian_(model, name + "-Gaussian-2", module_name + "-Gaussian-2", mu_node_names, logvar2_node_names, gaussian_node_names, specify_layer); // Mix the two Gaussians std::vector<std::string> mixedGaussianPrior_node_names = addSinglyConnected(model, name + "-MixedGaussianPrior", module_name + "-MixedGaussianPrior", gaussian1_node_names, gaussian1_node_names.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(pi)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), TensorT(0), false, specify_layer); addSinglyConnected(model, module_name + "-MixedGaussianPrior", gaussian2_node_names, mixedGaussianPrior_node_names, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(1)-pi)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), TensorT(0), specify_layer); return mixedGaussianPrior_node_names; } template<typename TensorT> inline std::vector<std::string> ModelBuilder<TensorT>::addFullyConnectedBayesian(Model<TensorT>& model, const std::string& name, const std::string& module_name, const std::vector<std::string>& source_node_names, const int& n_nodes, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init_mu, const std::shared_ptr<SolverOp<TensorT>>& solver_mu, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init_logvar, const std::shared_ptr<SolverOp<TensorT>>& solver_logvar, const TensorT& logvar_1, const TensorT& logvar_2, const TensorT& pi, std::vector<std::string>& node_names_logvar_output, std::vector<std::string>& node_names_posterior_output, std::vector<std::string>& node_names_prior_output, const bool& specify_layer) { std::vector<std::string> node_names_output; node_names_logvar_output.clear(); node_names_posterior_output.clear(); node_names_prior_output.clear(); // Bayesian source layer std::vector<std::vector<std::string>> node_names_mu, node_names_logvar, node_names_gaussian; for (const auto& node_name_source : source_node_names) { // Create the source to mu and logvar nodes, links, and weights std::vector<std::string> node_names_mu_source = addFullyConnected(model, name + "-" + node_name_source + "-Mu", module_name + "-source", { node_name_source }, n_nodes, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), weight_init_mu, solver_mu, 0.0f, 0.0f, false, specify_layer); std::vector<std::string> node_names_logvar_source = addFullyConnected(model, name + "-" + node_name_source + "-LogVar", module_name + "-source", { node_name_source }, n_nodes, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), weight_init_logvar, solver_logvar, 0.0f, 0.0f, false, specify_layer); // Gaussian node for each source node std::vector<std::string> node_names_gaussian_source = addGaussianEncoding(model, name + "-" + node_name_source + "-Gaussian", module_name + "-Gaussian", node_names_mu_source, node_names_logvar_source, specify_layer); // Gaussian posterior and prior for each source node std::vector<std::string> node_names_posterior_source = addGaussianPosterior(model, name + "-" + node_name_source + "-Posterior", module_name + "-Posterior", node_names_mu_source, node_names_logvar_source, node_names_gaussian_source, specify_layer); std::vector<std::string> node_names_prior_source = addMixedGaussianPior(model, name + "-" + node_name_source + "-Prior", module_name + "-Prior", node_names_gaussian_source, logvar_1, logvar_2, pi, specify_layer); // Update the output node names node_names_mu.push_back(node_names_mu_source); node_names_logvar.push_back(node_names_logvar_source); for (const std::string& node_name : node_names_logvar_source) node_names_logvar_output.push_back(node_name); node_names_gaussian.push_back(node_names_gaussian_source); for (const std::string& node_name : node_names_posterior_source) node_names_posterior_output.push_back(node_name); for (const std::string& node_name : node_names_prior_source) node_names_prior_output.push_back(node_name); } // Create the output nodes for (int i = 0; i < n_nodes; ++i) { char* node_name_output_char = new char[512]; sprintf(node_name_output_char, "%s_%012d", name.data(), i); std::string node_name_output(node_name_output_char); node_names_output.push_back(node_name_output); Node<TensorT> node_output(node_name_output, NodeType::hidden, NodeStatus::initialized, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad); node_output.setModuleName(module_name); if (specify_layer) node_output.setLayerName(module_name); model.addNodes({ node_output }); delete[] node_name_output_char; } // Link the Guassian nodes to the output nodes for (int i = 0; i < source_node_names.size(); ++i) { addSinglyConnected(model, module_name, node_names_gaussian.at(i), node_names_output, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0, specify_layer); } return node_names_output; } template<typename TensorT> inline std::string ModelBuilder<TensorT>::makeUnityWeight(Model<TensorT>& model, const TensorT & scale, const std::string& module_name, const std::string& name_format, const std::string& lhs, const std::string& rhs, const bool& specify_layer) { // Create the unity weight char* unity_weight_name_char = new char[512]; sprintf(unity_weight_name_char, name_format.data(), lhs.data(), rhs.data()); std::string unity_weight_name(unity_weight_name_char); Weight<TensorT> unity_weight(unity_weight_name, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(scale)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); unity_weight.setModuleName(module_name); if (specify_layer) unity_weight.setLayerName(module_name); model.addWeights({ unity_weight }); delete[] unity_weight_name_char; return unity_weight_name; } } #endif //EVONET_MODELBUILDER_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_METRICFUNCTION_H #define EVONET_METRICFUNCTION_H namespace EvoNet { /** @brief Base class for all model metric functions Abbreviations used in classes: - BC: binary classification - MC: multiclass classification - ML: multilabel classification - H: hierarchical classification - micro: micro averaging - macro: macro averaging */ template<typename TensorT> class MetricFunctionOp { public: MetricFunctionOp() = default; virtual ~MetricFunctionOp() = default; virtual std::string getName() = 0; virtual std::vector<TensorT> getParameters() const = 0; std::string getReductionFunc() { return reduction_func_; } void setReductionFunc(const std::string& reduction_func) { reduction_func_ = reduction_func; } protected: std::string reduction_func_ = "Sum"; }; /** @brief Classification accuracy function for binary classification problems. */ template<typename TensorT> class AccuracyBCOp : public MetricFunctionOp<TensorT> { public: AccuracyBCOp() = default; AccuracyBCOp(const TensorT& classification_threshold) :classification_threshold_(classification_threshold) {} std::string getName() { return "AccuracyBCOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({this->classification_threshold_}); } TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; ///< greater than or equal to is true, less than is false }; /** @brief Classification accuracy function for multiclass classification problems using micro averaging. */ template<typename TensorT> class AccuracyMCMicroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "AccuracyMCMicroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief Classification accuracy function for multiclass classification problems using micro averaging. */ template<typename TensorT> class AccuracyMCMacroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "AccuracyMCMacroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief Classification precision function for binary classification problems. */ template<typename TensorT> class PrecisionBCOp : public MetricFunctionOp<TensorT> { public: PrecisionBCOp() = default; PrecisionBCOp(const TensorT& classification_threshold) :classification_threshold_(classification_threshold) {} std::string getName() { return "PrecisionBCOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->classification_threshold_ }); } TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; ///< greater than or equal to is true, less than is false }; /** @brief Classification precision function for multiclass classification problems using micro averaging. */ template<typename TensorT> class PrecisionMCMicroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "PrecisionMCMicroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief Classification precision function for multiclass classification problems using micro averaging. */ template<typename TensorT> class PrecisionMCMacroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "PrecisionMCMacroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief Classification recall function for binary classification problems. */ template<typename TensorT> class RecallBCOp : public MetricFunctionOp<TensorT> { public: RecallBCOp() = default; RecallBCOp(const TensorT& classification_threshold) :classification_threshold_(classification_threshold) {} std::string getName() { return "RecallBCOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->classification_threshold_ }); } TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; ///< greater than or equal to is true, less than is false }; /** @brief Classification recall function for multiclass classification problems using micro averaging. */ template<typename TensorT> class RecallMCMicroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "RecallMCMicroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief Classification recall function for multiclass classification problems using micro averaging. */ template<typename TensorT> class RecallMCMacroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "RecallMCMacroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief Prediction biass function. */ template<typename TensorT> class PredictionBiasOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "PredictionBiasOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; /** @brief F1 score function for binary classification problems. */ template<typename TensorT> class F1ScoreBCOp : public MetricFunctionOp<TensorT> { public: F1ScoreBCOp() = default; F1ScoreBCOp(const TensorT& classification_threshold) :classification_threshold_(classification_threshold) {}; std::string getName() { return "F1ScoreBCOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->classification_threshold_ }); } TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; }; /** @brief F1 score function for multiclass classification problems using micro averaging. */ template<typename TensorT> class F1ScoreMCMicroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "F1ScoreMCMicroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief F1 score function for multiclass classification problems using macro averaging. */ template<typename TensorT> class F1ScoreMCMacroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "F1ScoreMCMacroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief Area under the ROC (AUROC) function. */ template<typename TensorT> class AUROCOp : public MetricFunctionOp<TensorT> { public: AUROCOp() = default; AUROCOp(const TensorT& classification_threshold) :classification_threshold_(classification_threshold) {} std::string getName() { return "AUROCOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->classification_threshold_ }); } TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; }; /** @brief Mathews correlation coefficient (MCC) function for binary classification. */ template<typename TensorT> class MCCBCOp : public MetricFunctionOp<TensorT> { public: MCCBCOp() = default; MCCBCOp(const TensorT& classification_threshold) :classification_threshold_(classification_threshold) {} std::string getName() { return "MCCBCOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ this->classification_threshold_ }); } TensorT getClassificationThreshold() const { return this->classification_threshold_; } protected: TensorT classification_threshold_ = 0.5; }; /** @brief Mathews correlation coefficient (MCC) function for multiclass classification problems using micro averaging. */ template<typename TensorT> class MCCMCMicroOp : public MetricFunctionOp<TensorT> { public: using MetricFunctionOp<TensorT>::MetricFunctionOp; std::string getName() { return "MCCMCMicroOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>({ }); } }; /** @brief MAE Mean Absolute Error function. */ template<typename TensorT> class MAEOp : public MetricFunctionOp<TensorT> { public: MAEOp() = default; ~MAEOp() = default; MAEOp(const std::string& reduction_func){ this->setReductionFunc(reduction_func); }; ///< Options are Sum, Mean, Var std::string getName() { return "MAEOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; /** @brief CosineSimilarity function. */ template<typename TensorT> class CosineSimilarityOp : public MetricFunctionOp<TensorT> { public: CosineSimilarityOp() = default; ~CosineSimilarityOp() = default; CosineSimilarityOp(const std::string& reduction_func){ this->setReductionFunc(reduction_func); }; ///< Options are Sum, Mean, Var std::string getName() { return "CosineSimilarityOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; /** @brief PearsonR function. */ template<typename TensorT> class PearsonROp : public MetricFunctionOp<TensorT> { public: PearsonROp() = default; ~PearsonROp() = default; PearsonROp(const std::string& reduction_func){ this->setReductionFunc(reduction_func); }; ///< Options are Sum, Mean, Var std::string getName() { return "PearsonROp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; /** @brief EuclideanDist function. */ template<typename TensorT> class EuclideanDistOp : public MetricFunctionOp<TensorT> { public: EuclideanDistOp() = default; ~EuclideanDistOp() = default; EuclideanDistOp(const std::string& reduction_func){ this->setReductionFunc(reduction_func); }; ///< Options are Sum, Mean, Var std::string getName() { return "EuclideanDistOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; /** @brief ManhattanDist function. */ template<typename TensorT> class ManhattanDistOp : public MetricFunctionOp<TensorT> { public: ManhattanDistOp() = default; ~ManhattanDistOp() = default; ManhattanDistOp(const std::string& reduction_func){ this->setReductionFunc(reduction_func); }; ///< Options are Sum, Mean, Var std::string getName() { return "ManhattanDistOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; /** @brief JeffreysAndMatusitaDist function. */ template<typename TensorT> class JeffreysAndMatusitaDistOp : public MetricFunctionOp<TensorT> { public: JeffreysAndMatusitaDistOp() = default; ~JeffreysAndMatusitaDistOp() = default; JeffreysAndMatusitaDistOp(const std::string& reduction_func){ this->setReductionFunc(reduction_func); }; ///< Options are Sum, Mean, Var std::string getName() { return "JeffreysAndMatusitaDistOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; /** @brief LogarithmicDist function. */ template<typename TensorT> class LogarithmicDistOp : public MetricFunctionOp<TensorT> { public: LogarithmicDistOp() = default; ~LogarithmicDistOp() = default; LogarithmicDistOp(const std::string& reduction_func){ this->setReductionFunc(reduction_func); }; ///< Options are Sum, Mean, Var std::string getName() { return "LogarithmicDistOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; /** @brief PercentDifference function. */ template<typename TensorT> class PercentDifferenceOp : public MetricFunctionOp<TensorT> { public: PercentDifferenceOp() = default; ~PercentDifferenceOp() = default; PercentDifferenceOp(const std::string& reduction_func){ this->setReductionFunc(reduction_func); }; ///< Options are Sum, Mean, Var std::string getName() { return "PercentDifferenceOp"; }; std::vector<TensorT> getParameters() const { return std::vector<TensorT>(); } }; } #endif //EVONET_METRICFUNCTION_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_WEIGHTTENSORDATA_H #define EVONET_WEIGHTTENSORDATA_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif #include <unsupported/Eigen/CXX11/Tensor> //#include <cereal/access.hpp> // serialiation of private members //#include <cereal/types/memory.hpp> //#undef min // clashes with std::limit on windows in polymorphic.hpp //#undef max // clashes with std::limit on windows in polymorphic.hpp //#include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Network WeightMatrixData NOTES: - define the batch size and memory sizes - define the weight and solver param mapping - initialize tensors */ template<typename TensorT, typename DeviceT> class WeightTensorData { public: WeightTensorData() = default; ///< Default constructor WeightTensorData(const WeightTensorData& other) { h_weight_ = other.h_weight_; h_solver_params_ = other.h_solver_params_; h_error_ = other.h_error_; h_shared_weights_ = other.h_shared_weights_; d_weight_ = other.d_weight_; d_solver_params_ = other.d_solver_params_; d_error_ = other.d_error_; d_shared_weights_ = other.d_shared_weights_; layer1_size_ = other.layer1_size_; layer2_size_ = other.layer2_size_; n_solver_params_ = other.n_solver_params_; n_shared_weights_ = other.n_shared_weights_; h_error_updated_ = other.h_error_updated_; h_weight_updated_ = other.h_weight_updated_; h_solver_params_updated_ = other.h_solver_params_updated_; h_shared_weights_updated_ = other.h_shared_weights_updated_; d_error_updated_ = other.d_error_updated_; d_weight_updated_ = other.d_weight_updated_; d_solver_params_updated_ = other.d_solver_params_updated_; d_shared_weights_updated_ = other.d_shared_weights_updated_; sink_layer_integration_ = other.sink_layer_integration_; }; ~WeightTensorData() = default; ///< Default destructor inline bool operator==(const WeightTensorData& other) const { return std::tie( ) == std::tie( ) ; } inline bool operator!=(const WeightTensorData& other) const { return !(*this == other); } inline WeightTensorData& operator=(const WeightTensorData& other) { h_weight_ = other.h_weight_; h_solver_params_ = other.h_solver_params_; h_error_ = other.h_error_; h_shared_weights_ = other.h_shared_weights_; d_weight_ = other.d_weight_; d_solver_params_ = other.d_solver_params_; d_error_ = other.d_error_; d_shared_weights_ = other.d_shared_weights_; layer1_size_ = other.layer1_size_; layer2_size_ = other.layer2_size_; n_solver_params_ = other.n_solver_params_; n_shared_weights_ = other.n_shared_weights_; h_error_updated_ = other.h_error_updated_; h_weight_updated_ = other.h_weight_updated_; h_solver_params_updated_ = other.h_solver_params_updated_; h_shared_weights_updated_ = other.h_shared_weights_updated_; d_error_updated_ = other.d_error_updated_; d_weight_updated_ = other.d_weight_updated_; d_solver_params_updated_ = other.d_solver_params_updated_; d_shared_weights_updated_ = other.d_shared_weights_updated_; sink_layer_integration_ = other.sink_layer_integration_; return *this; } void setLayer1Size(const int& layer1_size) { layer1_size_ = layer1_size; } void setLayer2Size(const int& layer2_size) { layer2_size_ = layer2_size; } void setNSolverParams(const int& n_solver_params) { n_solver_params_ = n_solver_params; } void setNSharedWeights(const int& n_shared_weights) { n_shared_weights_ = n_shared_weights; } void setSinkLayerIntegration(const std::string& sink_layer_integration) { sink_layer_integration_ = sink_layer_integration; } int getLayer1Size() const { return layer1_size_; } int getLayer2Size() const { return layer2_size_; } int getNSolverParams() const { return n_solver_params_; } int getNSharedWeights() const { return n_shared_weights_; } std::string getSinkLayerIntegration() const { return sink_layer_integration_; } virtual void setWeight(const Eigen::Tensor<TensorT, 2>& weight) = 0; ///< weight setter Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> getWeight() { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weight(h_weight_.get(), layer1_size_, layer2_size_); return weight; }; ///< weight copy getter std::shared_ptr<TensorT[]> getHWeightPointer() { return h_weight_; }; ///< weight pointer getter std::shared_ptr<TensorT[]> getDWeightPointer() { return d_weight_; }; ///< weight pointer getter virtual void setSolverParams(const Eigen::Tensor<TensorT, 3>& solver_params) = 0; ///< solver_params setter Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> getSolverParams() { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> solver_params(h_solver_params_.get(), layer1_size_, layer2_size_, n_solver_params_); return solver_params; }; ///< solver_params copy getter std::shared_ptr<TensorT[]> getHSolverParamsPointer() { return h_solver_params_; }; ///< solver_params pointer getter std::shared_ptr<TensorT[]> getDSolverParamsPointer() { return d_solver_params_; }; ///< solver_params pointer getter virtual void setError(const Eigen::Tensor<TensorT, 2>& error) = 0; ///< error setter Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> getError() { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error(h_error_.get(), layer1_size_, layer2_size_); return error; }; ///< error copy getter std::shared_ptr<TensorT[]> getHErrorPointer() { return h_error_; }; ///< error pointer getter std::shared_ptr<TensorT[]> getDErrorPointer() { return d_error_; }; ///< error pointer getter virtual void setSharedWeights(const Eigen::Tensor<TensorT, 3>& shared_weights) = 0; ///< shared_weights setter Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> getSharedWeights() { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> shared_weights(h_shared_weights_.get(), layer1_size_, layer2_size_, n_shared_weights_); return shared_weights; }; ///< shared_weights copy getter std::shared_ptr<TensorT[]> getHSharedWeightsPointer() { return h_shared_weights_; }; ///< shared_weights pointer getter std::shared_ptr<TensorT[]> getDSharedWeightsPointer() { return d_shared_weights_; }; ///< shared_weights pointer getter int getTensorSize() { return layer1_size_ * layer2_size_ * sizeof(TensorT); }; ///< Get the size of each tensor in bytes int getSolverParamsSize() { return layer1_size_ * layer2_size_ * n_solver_params_ * sizeof(TensorT); }; ///< Get the size of each tensor in bytes int getSharedWeightsSize() { return layer1_size_ * layer2_size_ * n_shared_weights_ * sizeof(TensorT); }; ///< Get the size of each tensor in bytes void initWeightTensorData(const int& layer1_size, const int&layer2_size, const std::vector<std::pair<int, int>>& weight_indices, const std::map<std::string, std::vector<std::pair<int, int>>>& shared_weight_indices, const std::vector<TensorT>& weight_values, const bool& train, std::vector<TensorT>& solver_params, const std::string& sink_node_integration); virtual bool syncHAndDError(DeviceT& device) = 0; virtual bool syncHAndDWeight(DeviceT& device) = 0; virtual bool syncHAndDSolverParams(DeviceT& device) = 0; virtual bool syncHAndDSharedWeights(DeviceT& device) = 0; std::pair<bool, bool> getErrorStatus() const { return std::make_pair(h_error_updated_, d_error_updated_); }; void setErrorStatus(const bool& h_status, const bool& d_status) { h_error_updated_ = h_status; d_error_updated_ = d_status; }; std::pair<bool, bool> getWeightStatus() const { return std::make_pair(h_weight_updated_, d_weight_updated_); }; void setWeightStatus(const bool& h_status, const bool& d_status) { h_weight_updated_ = h_status; d_weight_updated_ = d_status; }; std::pair<bool, bool> getSolverParamsStatus() const { return std::make_pair(h_solver_params_updated_, d_solver_params_updated_); }; void setSolverParamsStatus(const bool& h_status, const bool& d_status) { h_solver_params_updated_ = h_status; d_solver_params_updated_ = d_status; }; std::pair<bool, bool> getSharedWeightsStatus() const { return std::make_pair(h_shared_weights_updated_, d_shared_weights_updated_); }; void setSharedWeightsStatus(const bool& h_status, const bool& d_status) { h_shared_weights_updated_ = h_status; d_shared_weights_updated_ = d_status; }; protected: int layer1_size_ = 1; ///< Layer1 size int layer2_size_ = 2; ///< Layer2 size int n_solver_params_ = 0; ///< The number of solver params int n_shared_weights_ = 0; ///< The number of shared weights in the layer /** @brief weight and error have the following dimensions: rows: # of layer1, cols: # of layer2 while solver_params have the following dimensions: */ std::shared_ptr<TensorT[]> h_weight_; std::shared_ptr<TensorT[]> h_solver_params_; std::shared_ptr<TensorT[]> h_error_; std::shared_ptr<TensorT[]> h_shared_weights_; std::shared_ptr<TensorT[]> d_weight_; std::shared_ptr<TensorT[]> d_solver_params_; std::shared_ptr<TensorT[]> d_error_; std::shared_ptr<TensorT[]> d_shared_weights_; // [TODO: add drop probability] bool h_error_updated_ = false; bool h_weight_updated_ = false; bool h_solver_params_updated_ = false; bool h_shared_weights_updated_ = false; bool d_error_updated_ = false; bool d_weight_updated_ = false; bool d_solver_params_updated_ = false; bool d_shared_weights_updated_ = false; std::string sink_layer_integration_; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(layer1_size_, layer2_size_, n_solver_params_, n_shared_weights_, // h_weight_, h_solver_params_, h_error_, h_shared_weights_, // d_weight_, d_solver_params_, d_error_, d_shared_weights_, // h_error_updated_, h_weight_updated_, h_solver_params_updated_, h_shared_weights_updated_, // d_error_updated_, d_weight_updated_, d_solver_params_updated_, d_shared_weights_updated_); // } }; template<typename TensorT, typename DeviceT> inline void WeightTensorData<TensorT, DeviceT>::initWeightTensorData(const int & layer1_size, const int & layer2_size, const std::vector<std::pair<int, int>>& weight_indices, const std::map<std::string, std::vector<std::pair<int, int>>>& shared_weight_indices, const std::vector<TensorT>& weight_values, const bool & train, std::vector<TensorT>& solver_params, const std::string& sink_node_integration) { assert(weight_indices.size() == weight_values.size()); setLayer1Size(layer1_size); setLayer2Size(layer2_size); setSinkLayerIntegration(sink_node_integration); // TODO: implement checks to ensure Tensors are not too large // results in a std::bad_array_new_length // make the weight and error tensors Eigen::Tensor<TensorT, 2> zero(layer1_size, layer2_size); zero.setZero(); Eigen::Tensor<TensorT, 2> weights(layer1_size, layer2_size); weights.setZero(); // DEPRECATED: TODO: delete //if (sink_node_integration == "ProdOp" && // (weight_values.size() < layer1_size*layer2_size || sqrt(weight_values.size()) != layer1_size*layer2_size)) { // char error_char[512]; // sprintf(error_char, "The weight values for a ProdOp integration type is less than the product of the source and sink layer sizes. This will result in a zero sink tensor."); // std::string error(error_char); // throw std::runtime_error(error_char); //} for (size_t i = 0; i < weight_indices.size(); ++i) { weights(weight_indices[i].first, weight_indices[i].second) = weight_values[i]; } setWeight(weights); setError(zero); // make the parameters setNSolverParams(solver_params.size()); if (solver_params.size() > 0) { Eigen::Tensor<TensorT, 3> params(layer1_size, layer2_size, (int)solver_params.size()); params.setZero(); for (int i = 0; i < solver_params.size(); ++i) { params.chip(i, 2).setConstant(solver_params[i]); } setSolverParams(params); } // make the shared weighs tensor setNSharedWeights(shared_weight_indices.size()); if (shared_weight_indices.size() > 0) { Eigen::Tensor<TensorT, 3> shared(layer1_size, layer2_size, (int)shared_weight_indices.size()); shared.setZero(); int iter = 0; for (const auto& weight_indices_map : shared_weight_indices) { for (const std::pair<int, int>& weight_index : weight_indices_map.second) { Eigen::array<int, 3> offsets = { weight_index.first, weight_index.second, iter }; Eigen::array<int, 3> extents = { 1, 1, 1 }; Eigen::Tensor<TensorT, 3> ones(1, 1, 1); ones.setConstant((TensorT)1); shared.slice(offsets, extents) = ones; } ++iter; } setSharedWeights(shared); } } template<typename TensorT> class WeightTensorDataCpu : public WeightTensorData<TensorT, Eigen::DefaultDevice> { public: void setWeight(const Eigen::Tensor<TensorT, 2>& weight) override { TensorT* h_weight = new TensorT[this->layer1_size_*this->layer2_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weight_copy(h_weight, this->layer1_size_, this->layer2_size_); weight_copy = weight; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_weight_.reset(h_weight, h_deleter); this->h_weight_.reset(h_weight); this->h_weight_updated_ = true; this->d_weight_updated_ = true; }; ///< weight setter void setSolverParams(const Eigen::Tensor<TensorT, 3>& solver_params) override { TensorT* h_solver_params = new TensorT[this->layer1_size_*this->layer2_size_*this->n_solver_params_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> solver_params_copy(h_solver_params, this->layer1_size_, this->layer2_size_, this->n_solver_params_); solver_params_copy = solver_params; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_solver_params_.reset(h_solver_params, h_deleter); this->h_solver_params_.reset(h_solver_params); this->h_solver_params_updated_ = true; this->d_solver_params_updated_ = true; }; ///< solver_params setter void setError(const Eigen::Tensor<TensorT, 2>& error) override { TensorT* h_error = new TensorT[this->layer1_size_*this->layer2_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_copy(h_error, this->layer1_size_, this->layer2_size_); error_copy = error; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_error_.reset(h_error, h_deleter); this->h_error_.reset(h_error); this->h_error_updated_ = true; this->d_error_updated_ = true; }; ///< error setter void setSharedWeights(const Eigen::Tensor<TensorT, 3>& shared_weights) override { TensorT* h_shared_weights = new TensorT[this->layer1_size_*this->layer2_size_*this->n_shared_weights_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> shared_weights_copy(h_shared_weights, this->layer1_size_, this->layer2_size_, this->n_shared_weights_); shared_weights_copy = shared_weights; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_shared_weights_.reset(h_shared_weights, h_deleter); this->h_shared_weights_.reset(h_shared_weights); this->h_shared_weights_updated_ = true; this->d_shared_weights_updated_ = true; }; ///< shared_weights setter bool syncHAndDError(Eigen::DefaultDevice& device) override { return true; } bool syncHAndDWeight(Eigen::DefaultDevice& device) override { return true; } bool syncHAndDSolverParams(Eigen::DefaultDevice& device) override { return true; } bool syncHAndDSharedWeights(Eigen::DefaultDevice& device) override { return true; } //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<WeightTensorData<TensorT, Eigen::DefaultDevice>>(this)); // } }; #if COMPILE_WITH_CUDA template<typename TensorT> class WeightTensorDataGpu : public WeightTensorData<TensorT, Eigen::GpuDevice> { public: void setWeight(const Eigen::Tensor<TensorT, 2>& weight) override { // allocate cuda and pinned host layer2 TensorT* d_weight; TensorT* h_weight; assert(cudaMalloc((void**)(&d_weight), getTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_weight), getTensorSize(), cudaHostAllocDefault) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weight_copy(h_weight, this->layer1_size_, this->layer2_size_); weight_copy = weight; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_weight_.reset(h_weight, h_deleter); this->d_weight_.reset(d_weight, d_deleter); this->h_weight_updated_ = true; this->d_weight_updated_ = false; }; ///< weight setter void setSolverParams(const Eigen::Tensor<TensorT, 3>& solver_params) override { // allocate cuda and pinned host layer2 TensorT* d_solver_params; TensorT* h_solver_params; assert(cudaMalloc((void**)(&d_solver_params), getSolverParamsSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_solver_params), getSolverParamsSize(), cudaHostAllocDefault) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> solver_params_copy(h_solver_params, this->layer1_size_, this->layer2_size_, this->n_solver_params_); solver_params_copy = solver_params; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_solver_params_.reset(h_solver_params, h_deleter); this->d_solver_params_.reset(d_solver_params, d_deleter); this->h_solver_params_updated_ = true; this->d_solver_params_updated_ = false; }; ///< solver_params setter void setError(const Eigen::Tensor<TensorT, 2>& error) override { // allocate cuda and pinned host layer2 TensorT* d_error; TensorT* h_error; assert(cudaMalloc((void**)(&d_error), getTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_error), getTensorSize(), cudaHostAllocDefault) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_copy(h_error, this->layer1_size_, this->layer2_size_); error_copy = error; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_error_.reset(h_error, h_deleter); this->d_error_.reset(d_error, d_deleter); this->h_error_updated_ = true; this->d_error_updated_ = false; }; ///< error setter void setSharedWeights(const Eigen::Tensor<TensorT, 3>& shared_weights) override { // allocate cuda and pinned host layer2 TensorT* d_shared_weights; TensorT* h_shared_weights; assert(cudaMalloc((void**)(&d_shared_weights), getSharedWeightsSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_shared_weights), getSharedWeightsSize(), cudaHostAllocDefault) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> shared_weights_copy(h_shared_weights, this->layer1_size_, this->layer2_size_, this->n_shared_weights_); shared_weights_copy = shared_weights; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_shared_weights_.reset(h_shared_weights, h_deleter); this->d_shared_weights_.reset(d_shared_weights, d_deleter); this->h_shared_weights_updated_ = true; this->d_shared_weights_updated_ = false; }; ///< shared_weights setter bool syncHAndDError(Eigen::GpuDevice& device) override { if (this->h_error_updated_ && !this->d_error_updated_) { device.memcpyHostToDevice(this->d_error_.get(), this->h_error_.get(), getTensorSize()); this->d_error_updated_ = true; this->h_error_updated_ = false; return true; } else if (!this->h_error_updated_ && this->d_error_updated_) { device.memcpyDeviceToHost(this->h_error_.get(), this->d_error_.get(), getTensorSize()); this->h_error_updated_ = true; this->d_error_updated_ = false; return true; } else { //std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } } bool syncHAndDWeight(Eigen::GpuDevice& device) override { if (this->h_weight_updated_ && !this->d_weight_updated_) { device.memcpyHostToDevice(this->d_weight_.get(), this->h_weight_.get(), getTensorSize()); this->d_weight_updated_ = true; this->h_weight_updated_ = false; return true; } else if (!this->h_weight_updated_ && this->d_weight_updated_) { device.memcpyDeviceToHost(this->h_weight_.get(), this->d_weight_.get(), getTensorSize()); this->h_weight_updated_ = true; this->d_weight_updated_ = false; return true; } else { //std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } return true; } bool syncHAndDSolverParams(Eigen::GpuDevice& device) override { if (this->h_solver_params_updated_ && !this->d_solver_params_updated_) { device.memcpyHostToDevice(this->d_solver_params_.get(), this->h_solver_params_.get(), getSolverParamsSize()); this->d_solver_params_updated_ = true; this->h_solver_params_updated_ = false; return true; } else if (!this->h_solver_params_updated_ && this->d_solver_params_updated_) { device.memcpyDeviceToHost(this->h_solver_params_.get(), this->d_solver_params_.get(), getSolverParamsSize()); this->h_solver_params_updated_ = true; this->d_solver_params_updated_ = false; return true; } else { //std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } return true; } bool syncHAndDSharedWeights(Eigen::GpuDevice& device) override { if (this->h_shared_weights_updated_ && !this->d_shared_weights_updated_) { device.memcpyHostToDevice(this->d_shared_weights_.get(), this->h_shared_weights_.get(), getSharedWeightsSize()); this->d_shared_weights_updated_ = true; this->h_shared_weights_updated_ = false; return true; } else if (!this->h_shared_weights_updated_ && this->d_shared_weights_updated_) { device.memcpyDeviceToHost(this->h_shared_weights_.get(), this->d_shared_weights_.get(), getSharedWeightsSize()); this->h_shared_weights_updated_ = true; this->d_shared_weights_updated_ = false; return true; } else { //std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } return true; } //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<WeightTensorData<TensorT, Eigen::GpuDevice>>(this)); // } }; #endif } //CEREAL_REGISTER_TYPE(EvoNet::WeightTensorDataCpu<float>); //// TODO: add double, int, etc. //#if COMPILE_WITH_CUDA //CEREAL_REGISTER_TYPE(EvoNet::WeightTensorDataGpu<float>); //// TODO: add double, int, etc. //#endif #endif //EVONET_WEIGHTTENSORDATA_H<file_sep># -------------------------------------------------------------------------- # EvoNet -- Open-Source Mass Spectrometry # -------------------------------------------------------------------------- # Copyright The EvoNet Team -- Eberhard Karls University Tuebingen, # ETH Zurich, and Freie Universitaet Berlin 2002-2017. # # This software is released under a three-clause BSD license: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of any author or any participating institution # may be used to endorse or promote products derived from this software # without specific prior written permission. # For a full list of authors, refer to the file AUTHORS. # -------------------------------------------------------------------------- # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING # INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # -------------------------------------------------------------------------- # $Maintainer: <NAME> $ # $Authors: <NAME> $ # -------------------------------------------------------------------------- # a collection of wrapper for export functions that allows easier usage # througout the EvoNet build system set(_EVONET_EXPORT_FILE "EvoNetTargets.cmake") # clear list before we refill it set(_EVONET_EXPORT_TARGETS "" CACHE INTERNAL "List of targets that will be exported.") macro(evonet_register_export_target target_name) set(_EVONET_EXPORT_TARGETS ${_EVONET_EXPORT_TARGETS} ${target_name} CACHE INTERNAL "List of targets that will be exported.") endmacro() macro(evonet_export_targets ) set(_EXPORT_INCLUDE_BLOCK "") foreach(_target ${_EVONET_EXPORT_TARGETS}) # check if we have a corresponding include_dir variable if(NOT DEFINED ${_target}_INCLUDE_DIRECTORIES) message(FATAL_ERROR "Please provide the matching include directory variable ${_target}_INCLUDE_DIRECTORIES for export target ${_target}") endif() # extend include block set(_EXPORT_INCLUDE_BLOCK "set(${_target}_INCLUDE_DIRECTORIES \"${${_target}_INCLUDE_DIRECTORIES}\")\n\n${_EXPORT_INCLUDE_BLOCK}") endforeach() # configure EvoNetConfig.cmake configure_file( "${EVONET_HOST_DIRECTORY}/cmake/EvoNetConfig.cmake.in" "${PROJECT_BINARY_DIR}/EvoNetConfig.cmake" @ONLY ) # configure EvoNetConfig.cmake configure_file( "${EVONET_HOST_DIRECTORY}/cmake/EvoNetConfigVersion.cmake.in" "${PROJECT_BINARY_DIR}/EvoNetConfigVersion.cmake" @ONLY ) # create corresponding target file export(TARGETS ${_EVONET_EXPORT_TARGETS} FILE ${EVONET_HOST_BINARY_DIRECTORY}/${_EVONET_EXPORT_FILE}) # -------------------------------------------------------------------------- # export for install; clear variable before refilling set(_EXPORT_INCLUDE_BLOCK "") foreach(_target ${_EVONET_EXPORT_TARGETS}) # check if we have a corresponding include_dir variable if(NOT DEFINED ${_target}_INCLUDE_DIRECTORIES) message(FATAL_ERROR "Please provide the matching include directory variable ${_target}_INCLUDE_DIRECTORIES for export target ${_target}") endif() # find all includes that will not be installed with EvoNet set(_NON_INSTALLABLE_INCLUDES "") foreach(_incl_path ${${_target}_INCLUDE_DIRECTORIES}) if (NOT "${_incl_path}" MATCHES "^${EVONET_HOST_DIRECTORY}.*" AND NOT "${_incl_path}" MATCHES "^${EVONET_HOST_BINARY_DIRECTORY}.*") set(_NON_INSTALLABLE_INCLUDES ${_NON_INSTALLABLE_INCLUDES} ${_incl_path}) endif() endforeach() # append install include dir set(_NON_INSTALLABLE_INCLUDES ${_NON_INSTALLABLE_INCLUDES} ${INSTALL_INCLUDE_DIR}) set(_EXPORT_INCLUDE_BLOCK "set(${_target}_INCLUDE_DIRECTORIES \"${_NON_INSTALLABLE_INCLUDES}\")\n\n${_EXPORT_INCLUDE_BLOCK}") endforeach() # configure EvoNetConfig.cmake configure_file( "${EVONET_HOST_DIRECTORY}/cmake/EvoNetConfig.cmake.in" "${PROJECT_BINARY_DIR}/install/EvoNetConfig.cmake" @ONLY ) # install the generated config file install_file(${PROJECT_BINARY_DIR}/install/EvoNetConfig.cmake ${INSTALL_SHARE_DIR}/cmake share) # .. and ConfigVersion.cmake install_file(${PROJECT_BINARY_DIR}/EvoNetConfigVersion.cmake ${INSTALL_SHARE_DIR}/cmake share) # register the package export(PACKAGE EvoNet) endmacro() <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE NodeFile test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/NodeFile.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(NodeFile1) BOOST_AUTO_TEST_CASE(constructor) { NodeFile<float>* ptr = nullptr; NodeFile<float>* nullPointer = nullptr; ptr = new NodeFile<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { NodeFile<float>* ptr = nullptr; ptr = new NodeFile<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(storeAndLoadCsv) { NodeFile<float> data; std::string filename = "NodeFileTest.csv"; // create list of dummy nodes std::map<std::string, std::shared_ptr<Node<float>>> nodes; for (int i=0; i<3; ++i) { std::shared_ptr<Node<float>> node(new Node<float>( "Node_" + std::to_string(i), NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()))); node->setModuleName("Mod_" + std::to_string(i)); node->setLayerName("Layer_" + std::to_string(i)); node->setTensorIndex(std::make_pair(i, i+1)); nodes.emplace("Node_" + std::to_string(i), node); } data.storeNodesCsv(filename, nodes); std::map<std::string, std::shared_ptr<Node<float>>> nodes_test; data.loadNodesCsv(filename, nodes_test); int i = 0; for (auto& nodes_map: nodes_test) { BOOST_CHECK_EQUAL(nodes_map.second->getName(), "Node_" + std::to_string(i)); BOOST_CHECK(nodes_map.second->getType() == NodeType::hidden); BOOST_CHECK(nodes_map.second->getStatus() == NodeStatus::initialized); BOOST_CHECK_EQUAL(nodes_map.second->getActivation()->getName(), "ReLUOp"); BOOST_CHECK_EQUAL(nodes_map.second->getActivationGrad()->getName(), "ReLUGradOp"); BOOST_CHECK_EQUAL(nodes_map.second->getIntegration()->getName(), "SumOp"); BOOST_CHECK_EQUAL(nodes_map.second->getIntegrationError()->getName(), "SumErrorOp"); BOOST_CHECK_EQUAL(nodes_map.second->getIntegrationWeightGrad()->getName(), "SumWeightGradOp"); BOOST_CHECK_EQUAL(nodes_map.second->getModuleName(), "Mod_" + std::to_string(i)); BOOST_CHECK_EQUAL(nodes_map.second->getLayerName(), "Layer_" + std::to_string(i)); BOOST_CHECK_EQUAL(nodes_map.second->getTensorIndex().first, i); BOOST_CHECK_EQUAL(nodes_map.second->getTensorIndex().second, i + 1); //BOOST_CHECK(nodes_map.second == nodes.at(nodes_map.first)); // Broken ++i; } } BOOST_AUTO_TEST_CASE(storeAndLoadBinary) { NodeFile<float> data; std::string filename = "NodeFileTest.bin"; // create list of dummy nodes std::map<std::string, std::shared_ptr<Node<float>>> nodes; for (int i = 0; i < 3; ++i) { std::shared_ptr<Node<float>> node(new Node<float>( "Node_" + std::to_string(i), NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>()))); node->setModuleName("Mod_" + std::to_string(i)); node->setLayerName("Layer_" + std::to_string(i)); node->setTensorIndex(std::make_pair(i, i + 1)); nodes.emplace("Node_" + std::to_string(i), node); } data.storeNodesBinary(filename, nodes); std::map<std::string, std::shared_ptr<Node<float>>> nodes_test; data.loadNodesBinary(filename, nodes_test); int i = 0; for (auto& nodes_map : nodes_test) { BOOST_CHECK_EQUAL(nodes_map.second->getName(), "Node_" + std::to_string(i)); BOOST_CHECK(nodes_map.second->getType() == NodeType::hidden); BOOST_CHECK(nodes_map.second->getStatus() == NodeStatus::initialized); BOOST_CHECK_EQUAL(nodes_map.second->getActivation()->getName(), "ReLUOp"); BOOST_CHECK_EQUAL(nodes_map.second->getActivationGrad()->getName(), "ReLUGradOp"); BOOST_CHECK_EQUAL(nodes_map.second->getIntegration()->getName(), "SumOp"); BOOST_CHECK_EQUAL(nodes_map.second->getIntegrationError()->getName(), "SumErrorOp"); BOOST_CHECK_EQUAL(nodes_map.second->getIntegrationWeightGrad()->getName(), "SumWeightGradOp"); BOOST_CHECK_EQUAL(nodes_map.second->getModuleName(), "Mod_" + std::to_string(i)); BOOST_CHECK_EQUAL(nodes_map.second->getLayerName(), "Layer_" + std::to_string(i)); BOOST_CHECK_EQUAL(nodes_map.second->getTensorIndex().first, i); BOOST_CHECK_EQUAL(nodes_map.second->getTensorIndex().second, i + 1); //BOOST_CHECK(nodes_map.second == nodes.at(nodes_map.first)); // Broken ++i; } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELERRORDATA_H #define EVONET_MODELERRORDATA_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif #include <unsupported/Eigen/CXX11/Tensor> #include <memory> //#include <cereal/access.hpp> // serialiation of private members //#include <cereal/types/memory.hpp> //#undef min // clashes with std::limit on windows in polymorphic.hpp //#undef max // clashes with std::limit on windows in polymorphic.hpp //#include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Network ModelErrorData */ template<typename TensorT, typename DeviceT> class ModelErrorData { public: ModelErrorData() = default; ///< Default constructor ModelErrorData(const ModelErrorData& other) { batch_size_ = other.batch_size_; memory_size_ = other.memory_size_; n_metrics_ = other.n_metrics_; h_error_ = other.h_error_; d_error_ = other.d_error_; h_metric_ = other.h_metric_; d_metric_ = other.d_metric_; h_error_updated_ = other.h_error_updated_; d_error_updated_ = other.d_error_updated_; h_metric_updated_ = other.h_metric_updated_; d_metric_updated_ = other.d_metric_updated_; }; ~ModelErrorData() = default; ///< Default destructor inline bool operator==(const ModelErrorData& other) const { return std::tie( ) == std::tie( ) ; } inline bool operator!=(const ModelErrorData& other) const { return !(*this == other); } inline ModelErrorData& operator=(const ModelErrorData& other) { batch_size_ = other.batch_size_; memory_size_ = other.memory_size_; n_metrics_ = other.n_metrics_; h_error_ = other.h_error_; d_error_ = other.d_error_; h_metric_ = other.h_metric_; d_metric_ = other.d_metric_; h_error_updated_ = other.h_error_updated_; d_error_updated_ = other.d_error_updated_; h_metric_updated_ = other.h_metric_updated_; d_metric_updated_ = other.d_metric_updated_; return *this; } void setBatchSize(const size_t& batch_size) { (batch_size <= 0) ? batch_size_ = 1 : batch_size_ = batch_size; } void setMemorySize(const size_t& memory_size) { (memory_size <= 0) ? memory_size_ = 1 : memory_size_ = memory_size; } void setNMetrics(const size_t& n_metrics) { (n_metrics <= 0) ? n_metrics_ = 1: n_metrics_ = n_metrics; } size_t getBatchSize() const { return batch_size_; } size_t getMemorySize() const { return memory_size_; } size_t getNMetrics() const { return n_metrics_; } virtual void setError(const Eigen::Tensor<TensorT, 2>& error) = 0; ///< error setter Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> getError() { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error(h_error_.get(), batch_size_, memory_size_); return error; }; ///< error copy getter std::shared_ptr<TensorT[]> getHErrorPointer() { return h_error_; }; ///< error pointer getter std::shared_ptr<TensorT[]> getDErrorPointer() { return d_error_; }; ///< error pointer getter virtual void setMetric(const Eigen::Tensor<TensorT, 2>& metric) = 0; ///< metric setter Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> getMetric() { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> metric(h_metric_.get(), n_metrics_, memory_size_); return metric; }; ///< metric copy getter std::shared_ptr<TensorT[]> getHMetricPointer() { return h_metric_; }; ///< metric pointer getter std::shared_ptr<TensorT[]> getDMetricPointer() { return d_metric_; }; ///< metric pointer getter size_t getErrorTensorSize() { return batch_size_ * memory_size_ * sizeof(TensorT); }; ///< Get the size of each tensor in bytes size_t getMetricTensorSize() { return n_metrics_ * memory_size_ * sizeof(TensorT); }; ///< Get the size of each tensor in bytes void initModelErrorData(const int& batch_size, const int& memory_size, const int& n_metrics); virtual bool syncHAndDError(DeviceT& device) = 0; virtual bool syncHAndDMetric(DeviceT& device) = 0; std::pair<bool, bool> getErrorStatus() { return std::make_pair(h_error_updated_, d_error_updated_); }; std::pair<bool, bool> getMetricStatus() { return std::make_pair(h_metric_updated_, d_metric_updated_); }; protected: size_t batch_size_ = 1; ///< Mini batch size size_t memory_size_ = 2; ///< Memory size size_t n_metrics_ = 2; ///< The number of model metrics /** @brief output, error and derivative have the following dimensions: rows: # of samples, cols: # of time steps where the number of samples spans 0 to n samples and the number of time steps spans m time points to 0 */ std::shared_ptr<TensorT[]> h_error_ = nullptr; std::shared_ptr<TensorT[]> d_error_ = nullptr; std::shared_ptr<TensorT[]> h_metric_ = nullptr; std::shared_ptr<TensorT[]> d_metric_ = nullptr; bool h_error_updated_ = false; bool d_error_updated_ = false; bool h_metric_updated_ = false; bool d_metric_updated_ = false; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(batch_size_, memory_size_, h_error_, d_error_, h_error_updated_, d_error_updated_); // } }; template<typename TensorT, typename DeviceT> inline void ModelErrorData<TensorT, DeviceT>::initModelErrorData(const int& batch_size, const int& memory_size, const int& n_metrics) { setBatchSize(batch_size); setMemorySize(memory_size); setNMetrics(n_metrics); Eigen::Tensor<TensorT, 2> zero(getBatchSize(), getMemorySize()); zero.setZero(); setError(zero); Eigen::Tensor<TensorT, 2> zero_metric(getNMetrics(), getMemorySize()); zero_metric.setZero(); setMetric(zero_metric); } template<typename TensorT> class ModelErrorDataCpu : public ModelErrorData<TensorT, Eigen::DefaultDevice> { public: void setError(const Eigen::Tensor<TensorT, 2>& error) override { TensorT* h_error = new TensorT[this->batch_size_*this->memory_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_copy(h_error, this->batch_size_, this->memory_size_); error_copy = error; this->h_error_.reset(h_error); this->h_error_updated_ = true; this->d_error_updated_ = true; }; ///< error setter bool syncHAndDError(Eigen::DefaultDevice& device) override { return true; } void setMetric(const Eigen::Tensor<TensorT, 2>& metric) override { TensorT* h_metric = new TensorT[this->n_metrics_*this->memory_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> metric_copy(h_metric, this->n_metrics_, this->memory_size_); metric_copy = metric; this->h_metric_.reset(h_metric); this->h_metric_updated_ = true; this->d_metric_updated_ = true; }; ///< metric setter bool syncHAndDMetric(Eigen::DefaultDevice& device) override { return true; } //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ModelErrorData<TensorT, Eigen::DefaultDevice>>(this)); // } }; #if COMPILE_WITH_CUDA template<typename TensorT> class ModelErrorDataGpu : public ModelErrorData<TensorT, Eigen::GpuDevice> { public: void setError(const Eigen::Tensor<TensorT, 2>& error) override { // allocate cuda and pinned host memory TensorT* d_error; TensorT* h_error; assert(cudaMalloc((void**)(&d_error), getErrorTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_error), getErrorTensorSize(), cudaHostAllocDefault) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_copy(h_error, this->batch_size_, this->memory_size_); error_copy = error; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_error_.reset(h_error, h_deleter); this->d_error_.reset(d_error, d_deleter); this->h_error_updated_ = true; this->d_error_updated_ = false; }; ///< error setter bool syncHAndDError(Eigen::GpuDevice& device) override { if (this->h_error_updated_ && !this->d_error_updated_) { device.memcpyHostToDevice(this->d_error_.get(), this->h_error_.get(), getErrorTensorSize()); this->d_error_updated_ = true; this->h_error_updated_ = false; return true; } else if (!this->h_error_updated_ && this->d_error_updated_) { device.memcpyDeviceToHost(this->h_error_.get(), this->d_error_.get(), getErrorTensorSize()); this->h_error_updated_ = true; this->d_error_updated_ = false; return true; } else { std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } } void setMetric(const Eigen::Tensor<TensorT, 2>& metric) override { // allocate cuda and pinned host memory TensorT* d_metric; TensorT* h_metric; assert(cudaMalloc((void**)(&d_metric), getMetricTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_metric), getMetricTensorSize(), cudaHostAllocDefault) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> metric_copy(h_metric, this->n_metrics_, this->memory_size_); metric_copy = metric; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_metric_.reset(h_metric, h_deleter); this->d_metric_.reset(d_metric, d_deleter); this->h_metric_updated_ = true; this->d_metric_updated_ = false; }; ///< metric setter bool syncHAndDMetric(Eigen::GpuDevice& device) override { if (this->h_metric_updated_ && !this->d_metric_updated_) { device.memcpyHostToDevice(this->d_metric_.get(), this->h_metric_.get(), getMetricTensorSize()); this->d_metric_updated_ = true; this->h_metric_updated_ = false; return true; } else if (!this->h_metric_updated_ && this->d_metric_updated_) { device.memcpyDeviceToHost(this->h_metric_.get(), this->d_metric_.get(), getMetricTensorSize()); this->h_metric_updated_ = true; this->d_metric_updated_ = false; return true; } else { std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } } //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ModelErrorData<TensorT, Eigen::GpuDevice>>(this)); // } }; #endif } //CEREAL_REGISTER_TYPE(EvoNet::ModelErrorDataCpu<float>); //// TODO: add double, int, etc. //#if COMPILE_WITH_CUDA //CEREAL_REGISTER_TYPE(EvoNet::ModelErrorDataGpu<float>); //// TODO: add double, int, etc. //#endif #endif //EVONET_MODELERRORDATA_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE WeightFile test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/WeightFile.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(WeightFile1) BOOST_AUTO_TEST_CASE(constructor) { WeightFile<float>* ptr = nullptr; WeightFile<float>* nullPointer = nullptr; ptr = new WeightFile<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { WeightFile<float>* ptr = nullptr; ptr = new WeightFile<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(parseParameters) { WeightFile<float> data; std::string parameters = "learning_rate:1.0;momentum:0.9;gradient_noise_sigma:1e3"; std::map<std::string, float> parameter_test = data.parseParameters(parameters); BOOST_CHECK_EQUAL(parameter_test.at("learning_rate"), 1.0); BOOST_CHECK_CLOSE(parameter_test.at("momentum"), 0.9, 1e3); BOOST_CHECK_EQUAL(parameter_test.at("gradient_noise_sigma"), 1e3); } BOOST_AUTO_TEST_CASE(storeAndLoadCsv) { WeightFile<float> data; std::string filename = "WeightFileTest.csv"; // create list of dummy weights std::map<std::string, std::shared_ptr<Weight<float>>> weights; std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; for (int i=0; i<3; ++i) { weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); std::shared_ptr<Weight<float>> weight(new Weight<float>( "Weight_" + std::to_string(i), weight_init, solver)); weight->setModuleName("Mod_" + std::to_string(i)); weight->setLayerName("Layer_" + std::to_string(i)); weight->addTensorIndex(std::make_tuple(i, i + 1, i + 2)); weight->addTensorIndex(std::make_tuple(i, i + 3, i + 4)); weights.emplace("Weight_" + std::to_string(i), weight); } data.storeWeightsCsv(filename, weights); std::map<std::string, std::shared_ptr<Weight<float>>> weights_test; data.loadWeightsCsv(filename, weights_test); int i = 0; for (auto& weight_map: weights_test) { BOOST_CHECK_EQUAL(weight_map.second->getName(), "Weight_" + std::to_string(i)); BOOST_CHECK_EQUAL(weight_map.second->getModuleName(), "Mod_" + std::to_string(i)); BOOST_CHECK_EQUAL(weight_map.second->getLayerName(), "Layer_" + std::to_string(i)); BOOST_CHECK_EQUAL(weight_map.second->getWeightInitOp()->operator()(), 1.0); BOOST_CHECK_EQUAL(std::get<0>(weight_map.second->getTensorIndex()[0]), i); BOOST_CHECK_EQUAL(std::get<1>(weight_map.second->getTensorIndex()[0]), i + 1); BOOST_CHECK_EQUAL(std::get<2>(weight_map.second->getTensorIndex()[0]), i + 2); BOOST_CHECK_EQUAL(std::get<0>(weight_map.second->getTensorIndex()[1]), i); BOOST_CHECK_EQUAL(std::get<1>(weight_map.second->getTensorIndex()[1]), i + 3); BOOST_CHECK_EQUAL(std::get<2>(weight_map.second->getTensorIndex()[1]), i + 4); //BOOST_CHECK(weight_map.second == weights.at(weight_map.first)); // Broken ++i; } } BOOST_AUTO_TEST_CASE(storeAndLoadWeightValuesCsv) { WeightFile<float> data; std::string filename = "WeightFileTest_weightValues.csv"; // create list of dummy weights std::map<std::string, std::shared_ptr<Weight<float>>> weights; std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; for (int i = 0; i < 3; ++i) { weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); std::shared_ptr<Weight<float>> weight(new Weight<float>( "Weight_" + std::to_string(i), weight_init, solver)); weight->setModuleName(std::to_string(i)); weight->setWeight(i); weights.emplace(weight->getName(), weight); } data.storeWeightValuesCsv(filename, weights); std::map<std::string, std::shared_ptr<Weight<float>>> weights_test; for (int i = 0; i < 3; ++i) { weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); std::shared_ptr<Weight<float>> weight(new Weight<float>( "Weight_" + std::to_string(i), weight_init, solver)); weight->setModuleName(std::to_string(i)); weight->setWeight(0); weights_test.emplace(weight->getName(), weight); } data.loadWeightValuesCsv(filename, weights_test); for (auto& weight: weights_test) { BOOST_CHECK_EQUAL(weight.second->getName(), weights.at(weight.second->getName())->getName()); BOOST_CHECK_EQUAL(weight.second->getWeight(), weights.at(weight.second->getName())->getWeight()); } } BOOST_AUTO_TEST_CASE(storeAndLoadBinary) { WeightFile<float> data; std::string filename = "WeightFileTest.bin"; // create list of dummy weights std::map<std::string, std::shared_ptr<Weight<float>>> weights; std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; for (int i = 0; i < 3; ++i) { weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); std::shared_ptr<Weight<float>> weight(new Weight<float>( "Weight_" + std::to_string(i), weight_init, solver)); weight->setModuleName("Mod_" + std::to_string(i)); weight->setLayerName("Layer_" + std::to_string(i)); weight->addTensorIndex(std::make_tuple(i, i + 1, i + 2)); weight->addTensorIndex(std::make_tuple(i, i + 3, i + 4)); weight->setWeight(float(i)); weight->setInitWeight(false); weights.emplace("Weight_" + std::to_string(i), weight); } data.storeWeightsBinary(filename, weights); std::map<std::string, std::shared_ptr<Weight<float>>> weights_test; data.loadWeightsBinary(filename, weights_test); int i = 0; for (auto& weight_map : weights_test) { BOOST_CHECK_EQUAL(weight_map.second->getName(), "Weight_" + std::to_string(i)); BOOST_CHECK_EQUAL(weight_map.second->getModuleName(), "Mod_" + std::to_string(i)); BOOST_CHECK_EQUAL(weight_map.second->getLayerName(), "Layer_" + std::to_string(i)); BOOST_CHECK_EQUAL(weight_map.second->getWeight(), float(i)); BOOST_CHECK(!weight_map.second->getInitWeight()); BOOST_CHECK_EQUAL(weight_map.second->getWeightInitOp()->operator()(), 1.0); BOOST_CHECK_EQUAL(std::get<0>(weight_map.second->getTensorIndex()[0]), i); BOOST_CHECK_EQUAL(std::get<1>(weight_map.second->getTensorIndex()[0]), i + 1); BOOST_CHECK_EQUAL(std::get<2>(weight_map.second->getTensorIndex()[0]), i + 2); BOOST_CHECK_EQUAL(std::get<0>(weight_map.second->getTensorIndex()[1]), i); BOOST_CHECK_EQUAL(std::get<1>(weight_map.second->getTensorIndex()[1]), i + 3); BOOST_CHECK_EQUAL(std::get<2>(weight_map.second->getTensorIndex()[1]), i + 4); //BOOST_CHECK(weight_map.second == weights.at(weight_map.first)); // Broken ++i; } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelKernal test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelKernal.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(ModelKernal1) BOOST_AUTO_TEST_CASE(constructorDefaultDevice) { ModelKernalDefaultDevice<float>* ptr = nullptr; ModelKernalDefaultDevice<float>* nullPointer = nullptr; ptr = new ModelKernalDefaultDevice<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructorDefaultDevice) { ModelKernalDefaultDevice<float>* ptr = nullptr; ptr = new ModelKernalDefaultDevice<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(nodeActivationDefaultDevice) { ModelKernalDefaultDevice<float> kernal; const int device_id = 0; std::shared_ptr<ActivationTensorOp<float, Eigen::DefaultDevice>> activation_function = std::make_shared<ReLUTensorOp<float, Eigen::DefaultDevice>>(ReLUTensorOp<float, Eigen::DefaultDevice>()); const int batch_size = 4; const int memory_size = 2; const int layer_size = 2; const int source_time_step = 0; const int node_time_step = 0; const int layer_byte_size = batch_size * memory_size * layer_size; float* h_node_input = new float[layer_byte_size]; float* d_node_input = new float[layer_byte_size]; float* h_node_output = new float[layer_byte_size]; float* d_node_output = new float[layer_byte_size]; float* h_node_dt = new float[layer_byte_size]; float* d_node_dt = new float[layer_byte_size]; //assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed? //// allocate memory //std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float); //assert(cudaHostAlloc((void**)(&h_node_input), bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_node_input), bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_node_output), bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_node_output), bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_node_dt), bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_node_dt), bytes) == cudaSuccess); Eigen::TensorMap<Eigen::Tensor<float, 3>> node_input(h_node_input, batch_size, memory_size, layer_size); node_input.setValues({ {{-1, 1}, {0, 0}}, {{-2, 2}, {0, 0}}, {{-3, 3}, {0, 0}}, {{-4, 4}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 3>> node_output(h_node_output, batch_size, memory_size, layer_size); node_output.setConstant(0); Eigen::TensorMap<Eigen::Tensor<float, 3>> node_dt(h_node_dt, batch_size, memory_size, layer_size); node_dt.setConstant(1); // Set up the device Eigen::DefaultDevice device; //cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! //assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); //Eigen::GpuStreamDevice stream_device(&stream, 0); //Eigen::GpuDevice device(&stream_device); bool success = kernal.executeNodeActivation( h_node_input, d_node_input, h_node_output, d_node_output, h_node_dt, d_node_dt, activation_function, batch_size, memory_size, layer_size, node_time_step, device, true, true); //// Synchronize the stream //cudaError_t err = cudaStreamQuery(stream); //assert(cudaStreamSynchronize(stream) == cudaSuccess); //assert(cudaStreamDestroy(stream) == cudaSuccess); Eigen::Tensor<float, 3> expected_output(batch_size, memory_size, layer_size); expected_output.setValues({ {{0, 1}, {0, 0}}, {{0, 2}, {0, 0}}, {{0, 3}, {0, 0}}, {{0, 4}, {0, 0}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int node_iter = 0; node_iter < layer_size; ++node_iter) { //std::cout << "[Output] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_output(batch_iter, memory_iter, node_iter) << std::endl; BOOST_CHECK_CLOSE(node_output(batch_iter, memory_iter, node_iter), expected_output(batch_iter, memory_iter, node_iter), 1e-4); } } } // release resources delete[] h_node_input; delete[] d_node_input; delete[] h_node_output; delete[] d_node_output; delete[] h_node_dt; delete[] d_node_dt; //assert(cudaFreeHost(h_node_input) == cudaSuccess); //assert(cudaFree(d_node_input) == cudaSuccess); //assert(cudaFreeHost(h_node_output) == cudaSuccess); //assert(cudaFree(d_node_output) == cudaSuccess); //assert(cudaFreeHost(h_node_dt) == cudaSuccess); //assert(cudaFree(d_node_dt) == cudaSuccess); } BOOST_AUTO_TEST_CASE(nodeDerivativeDefaultDevice) { ModelKernalDefaultDevice<float> kernal; const int device_id = 0; std::shared_ptr<ActivationTensorOp<float, Eigen::DefaultDevice>> activation_grad_function = std::make_shared<ReLUGradTensorOp<float, Eigen::DefaultDevice>>(ReLUGradTensorOp<float, Eigen::DefaultDevice>()); const int batch_size = 4; const int memory_size = 2; const int layer_size = 2; const int source_time_step = 0; const int node_time_step = 0; const int layer_byte_size = batch_size * memory_size * layer_size; float* h_node_output = new float[layer_byte_size]; float* d_node_output = new float[layer_byte_size]; float* h_node_derivative = new float[layer_byte_size]; float* d_node_derivative = new float[layer_byte_size]; //assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed? //// allocate memory //std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float); //assert(cudaHostAlloc((void**)(&h_node_output), bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_node_output), bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_node_derivative), bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_node_derivative), bytes) == cudaSuccess); Eigen::TensorMap<Eigen::Tensor<float, 3>> node_output(h_node_output, batch_size, memory_size, layer_size); node_output.setValues({ {{-1, 1}, {0, 0}}, {{-2, 2}, {0, 0}}, {{-3, 3}, {0, 0}}, {{-4, 4}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 3>> node_derivative(h_node_derivative, batch_size, memory_size, layer_size); node_derivative.setConstant(0); //// Set up the device Eigen::DefaultDevice device; //cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! //assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); //Eigen::GpuStreamDevice stream_device(&stream, 0); //Eigen::GpuDevice device(&stream_device); bool success = kernal.executeNodeDerivative( h_node_output, d_node_output, h_node_derivative, d_node_derivative, activation_grad_function, batch_size, memory_size, layer_size, node_time_step, device, true, true); //// Synchronize the stream //cudaError_t err = cudaStreamQuery(stream); //assert(cudaStreamSynchronize(stream) == cudaSuccess); //assert(cudaStreamDestroy(stream) == cudaSuccess); Eigen::Tensor<float, 3> expected_derivative(batch_size, memory_size, layer_size); expected_derivative.setValues({ {{0, 1}, {0, 0}}, {{0, 1}, {0, 0}}, {{0, 1}, {0, 0}}, {{0, 1}, {0, 0}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int node_iter = 0; node_iter < layer_size; ++node_iter) { //std::cout << "[Derivative] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_derivative(batch_iter, memory_iter, node_iter) << std::endl; BOOST_CHECK_CLOSE(node_derivative(batch_iter, memory_iter, node_iter), expected_derivative(batch_iter, memory_iter, node_iter), 1e-4); } } } // release resources delete[] h_node_output; delete[] d_node_output; delete[] h_node_derivative; delete[] d_node_derivative; //assert(cudaFreeHost(h_node_output) == cudaSuccess); //assert(cudaFree(d_node_output) == cudaSuccess); //assert(cudaFreeHost(h_node_derivative) == cudaSuccess); //assert(cudaFree(d_node_derivative) == cudaSuccess); } BOOST_AUTO_TEST_CASE(forwardPropogationDefaultDevice) { ModelKernalDefaultDevice<float> kernal; const int device_id = 0; std::shared_ptr<IntegrationTensorOp<float, Eigen::DefaultDevice>> integration_function = std::make_shared<SumTensorOp<float, Eigen::DefaultDevice>>(SumTensorOp<float, Eigen::DefaultDevice>()); const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_steps = 0; const int sink_time_step = 0; float* h_source_outputs = new float[batch_size * memory_size * source_layer_size]; float* d_source_outputs = new float[batch_size * memory_size * source_layer_size]; float* h_weights = new float[source_layer_size, sink_layer_size]; float* d_weights = new float[source_layer_size, sink_layer_size]; float* h_sink_input = new float[batch_size * memory_size * sink_layer_size]; float* d_sink_input = new float[batch_size * memory_size * sink_layer_size]; //assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed? //// allocate memory //std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(float); //std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(float); //std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float); //assert(cudaHostAlloc((void**)(&h_source_outputs), source_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_source_outputs), source_bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_weights), weight_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_weights), weight_bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_sink_input), sink_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_sink_input), sink_bytes) == cudaSuccess); Eigen::TensorMap<Eigen::Tensor<float, 3>> source_output(h_source_outputs, batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weights, source_layer_size, sink_layer_size); weight.setConstant(1); Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_input(h_sink_input, batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); // Set up the device Eigen::DefaultDevice device; //cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! //assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); //Eigen::GpuStreamDevice stream_device(&stream, 0); //Eigen::GpuDevice device(&stream_device); bool success = kernal.executeForwardPropogation( h_source_outputs, d_source_outputs, h_weights, d_weights, h_sink_input, d_sink_input, integration_function, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_steps, sink_time_step, device, true, true); //// Synchronize the stream //cudaError_t err = cudaStreamQuery(stream); //assert(cudaStreamSynchronize(stream) == cudaSuccess); //assert(cudaStreamDestroy(stream) == cudaSuccess); Eigen::Tensor<float, 3> expected_input(batch_size, memory_size, sink_layer_size); expected_input.setValues({ {{2}, {0}}, {{4}, {0}}, {{6}, {0}}, {{8}, {0}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int node_iter = 0; node_iter < sink_layer_size; ++node_iter) { //std::cout << "[Input] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << sink_input(batch_iter, memory_iter, node_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, node_iter), expected_input(batch_iter, memory_iter, node_iter), 1e-4); } } } // release resources delete[] h_source_outputs; delete[] d_source_outputs; delete[] h_sink_input; delete[] d_sink_input; } BOOST_AUTO_TEST_CASE(backwardPropogationDefaultDevice) { const int device_id = 0; ModelKernalDefaultDevice<float> kernal; std::shared_ptr<IntegrationErrorTensorOp<float, Eigen::DefaultDevice>> integration_function = std::make_shared<SumErrorTensorOp<float, Eigen::DefaultDevice>>(SumErrorTensorOp<float, Eigen::DefaultDevice>()); const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 0; float* h_source_errors = new float[batch_size * memory_size * source_layer_size]; float* d_source_errors = new float[batch_size * memory_size * source_layer_size]; float* h_source_inputs = new float[batch_size * memory_size * source_layer_size]; float* d_source_inputs = new float[batch_size * memory_size * source_layer_size]; float* h_weights = new float[source_layer_size, sink_layer_size]; float* d_weights = new float[source_layer_size, sink_layer_size]; float* h_sink_error = new float[batch_size * memory_size * sink_layer_size]; float* d_sink_error = new float[batch_size * memory_size * sink_layer_size]; float* h_sink_output = new float[batch_size * memory_size * sink_layer_size]; float* d_sink_output = new float[batch_size * memory_size * sink_layer_size]; float* h_sink_derivative = new float[batch_size * memory_size * sink_layer_size]; float* d_sink_derivative = new float[batch_size * memory_size * sink_layer_size]; //assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed? //// allocate memory //std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(float); //std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(float); //std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float); //assert(cudaHostAlloc((void**)(&h_source_errors), source_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_source_errors), source_bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_source_inputs), source_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_source_inputs), source_bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_weights), weight_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_weights), weight_bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_sink_error), sink_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_sink_error), sink_bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_sink_derivative), sink_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_sink_derivative), sink_bytes) == cudaSuccess); //assert(cudaHostAlloc((void**)(&h_sink_output), sink_bytes, cudaHostAllocDefault) == cudaSuccess); //assert(cudaMalloc((void**)(&d_sink_output), sink_bytes) == cudaSuccess); Eigen::TensorMap<Eigen::Tensor<float, 3>> source_error(h_source_errors, batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 3>> source_input(h_source_inputs, batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weights, source_layer_size, sink_layer_size); weight.setConstant(1); Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_derivative(h_sink_derivative, batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_error(h_sink_error, batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_output(h_sink_output, batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); // Set up the device Eigen::DefaultDevice device; //cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! //assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); //Eigen::GpuStreamDevice stream_device(&stream, 0); //Eigen::GpuDevice device(&stream_device); bool success = kernal.executeBackwardPropogation( h_source_errors, d_source_errors, h_source_inputs, d_source_inputs, h_sink_output, d_sink_output, h_weights, d_weights, h_sink_error, d_sink_error, h_sink_derivative, d_sink_derivative, source_layer_size, integration_function, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device, true, true); //// Synchronize the stream //cudaError_t err = cudaStreamQuery(stream); //assert(cudaStreamSynchronize(stream) == cudaSuccess); //assert(cudaStreamDestroy(stream) == cudaSuccess); Eigen::Tensor<float, 3> expected_error(batch_size, memory_size, sink_layer_size); expected_error.setValues({ {{4}, {0}}, {{8}, {0}}, {{12}, {0}}, {{16}, {0}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int node_iter = 0; node_iter < sink_layer_size; ++node_iter) { //std::cout << "[Sink Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << sink_error(batch_iter, memory_iter, node_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, node_iter), expected_error(batch_iter, memory_iter, node_iter), 1e-4); } } } //assert(cudaFreeHost(h_source_errors) == cudaSuccess); //assert(cudaFree(d_source_errors) == cudaSuccess); //assert(cudaFreeHost(h_source_inputs) == cudaSuccess); //assert(cudaFree(d_source_inputs) == cudaSuccess); //assert(cudaFreeHost(h_weights) == cudaSuccess); //assert(cudaFree(d_weights) == cudaSuccess); //assert(cudaFreeHost(h_sink_error) == cudaSuccess); //assert(cudaFree(d_sink_error) == cudaSuccess); //assert(cudaFreeHost(h_sink_derivative) == cudaSuccess); //assert(cudaFree(d_sink_derivative) == cudaSuccess); //assert(cudaFreeHost(h_sink_output) == cudaSuccess); //assert(cudaFree(d_sink_output) == cudaSuccess); } BOOST_AUTO_TEST_CASE(modelErrorDefaultDevice) { const int device_id = 0; ModelKernalDefaultDevice<float> kernal; std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_grad_function = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); const int batch_size = 4; const int memory_size = 2; const int layer_size = 2; const int time_step = 0; float* h_predicted = new float[batch_size * memory_size * layer_size]; float* d_predicted = new float[batch_size * memory_size * layer_size]; float* h_node_errors = new float[batch_size * memory_size * layer_size]; float* d_node_errors = new float[batch_size * memory_size * layer_size]; float* h_model_error = new float[batch_size * memory_size]; float* d_model_error = new float[batch_size * memory_size]; Eigen::TensorMap<Eigen::Tensor<float, 3>> predicted(h_predicted, batch_size, memory_size, layer_size); predicted.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 2>> model_error(h_model_error, batch_size, memory_size); model_error.setConstant(0); Eigen::TensorMap<Eigen::Tensor<float, 3>> node_error(h_node_errors, batch_size, memory_size, layer_size); node_error.setConstant(0); Eigen::Tensor<float, 2> expected(batch_size, layer_size); expected.setConstant(1); // Set up the device Eigen::DefaultDevice device; bool success = kernal.executeModelErrors( expected, h_predicted, d_predicted, h_model_error, d_model_error, h_node_errors, d_node_errors, loss_function, loss_grad_function, batch_size, memory_size, layer_size, time_step, device, true, true); Eigen::Tensor<float, 2> expected_model_error(batch_size, memory_size); expected_model_error.setValues({ {0, 0}, {0.5, 0}, {2.0, 0}, {4.5, 0} }); Eigen::Tensor<float, 3> expected_node_error(batch_size, memory_size, layer_size); expected_node_error.setValues({ { {0, 0 }, { 0, 0 } }, { {-0.5, -0.5 }, { 0, 0 } }, { {-1, -1 }, { 0, 0 } }, { {-1.5, -1.5 }, { 0, 0 } } }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { //std::cout << "[Model Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << " = " << model_error(batch_iter, memory_iter) << std::endl; BOOST_CHECK_CLOSE(model_error(batch_iter, memory_iter), expected_model_error(batch_iter, memory_iter), 1e-4); for (int node_iter = 0; node_iter < layer_size; ++node_iter) { //std::cout << "[Node Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_error(batch_iter, memory_iter, node_iter) << std::endl; BOOST_CHECK_CLOSE(node_error(batch_iter, memory_iter, node_iter), expected_node_error(batch_iter, memory_iter, node_iter), 1e-4); } } } // release resources delete[] h_predicted; delete[] d_predicted; delete[] h_model_error; delete[] d_model_error; delete[] h_node_errors; delete[] d_node_errors; } BOOST_AUTO_TEST_CASE(modelMetricDefaultDevice) { const int device_id = 0; ModelKernalDefaultDevice<float> kernal; std::shared_ptr<MetricFunctionTensorOp<float, Eigen::DefaultDevice>> metric_function = std::make_shared<MAETensorOp<float, Eigen::DefaultDevice>>(MAETensorOp<float, Eigen::DefaultDevice>()); const int batch_size = 4; const int memory_size = 2; const int layer_size = 2; const int n_metrics = 1; const int time_step = 0; const int metric_index = 0; float* h_predicted = new float[batch_size * memory_size * layer_size]; float* d_predicted = new float[batch_size * memory_size * layer_size]; float* h_model_metric = new float[n_metrics * memory_size]; float* d_model_metric = new float[n_metrics * memory_size]; Eigen::TensorMap<Eigen::Tensor<float, 3>> predicted(h_predicted, batch_size, memory_size, layer_size); predicted.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 2>> model_metric(h_model_metric, n_metrics, memory_size); model_metric.setConstant(0); Eigen::Tensor<float, 2> expected(batch_size, layer_size); expected.setConstant(1); // Set up the device Eigen::DefaultDevice device; bool success = kernal.executeModelMetric( expected, h_predicted, d_predicted, h_model_metric, d_model_metric, metric_function, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device, true, true); Eigen::Tensor<float, 2> expected_model_metric(n_metrics, memory_size); expected_model_metric.setValues({ {1.5, 0} }); for (int metric_iter = 0; metric_iter < n_metrics; ++metric_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { //std::cout << "[Model Metric] Metric iter: " << metric_iter << ", Memory Iter: " << memory_iter << " = " << model_metric(metric_iter, memory_iter) << std::endl; BOOST_CHECK_CLOSE(model_metric(metric_iter, memory_iter), expected_model_metric(metric_iter, memory_iter), 1e-4); } } // release resources delete[] h_predicted; delete[] d_predicted; delete[] h_model_metric; delete[] d_model_metric; } BOOST_AUTO_TEST_CASE(weightErrorDefaultDevice) { const int device_id = 0; ModelKernalDefaultDevice<float> kernal; std::shared_ptr<IntegrationWeightGradTensorOp<float, Eigen::DefaultDevice>> integration_function = std::make_shared<SumWeightGradTensorOp<float, Eigen::DefaultDevice>>(SumWeightGradTensorOp<float, Eigen::DefaultDevice>()); const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; float* h_sink_errors = new float[batch_size * memory_size * sink_layer_size]; float* d_sink_errors = new float[batch_size * memory_size * sink_layer_size]; float* h_source_outputs = new float[batch_size * memory_size * source_layer_size]; float* d_source_outputs = new float[batch_size * memory_size * source_layer_size]; float* h_source_inputs = new float[batch_size * memory_size * source_layer_size]; float* d_source_inputs = new float[batch_size * memory_size * source_layer_size]; float* h_weight = new float[source_layer_size * sink_layer_size]; float* d_weight = new float[source_layer_size * sink_layer_size]; float* h_weight_error = new float[source_layer_size * sink_layer_size]; float* d_weight_error = new float[source_layer_size * sink_layer_size]; Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_error(h_sink_errors, batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::TensorMap<Eigen::Tensor<float, 3>> source_output(h_source_outputs, batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 3>> source_input(h_source_inputs, batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weight, source_layer_size, sink_layer_size); weight.setConstant(1); Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size); weight_error.setConstant(0); // Set up the device Eigen::DefaultDevice device; bool success = kernal.executeWeightErrors( h_sink_errors, d_sink_errors, h_source_outputs, d_source_outputs, h_source_inputs, d_source_inputs, source_layer_size, integration_function, h_weight, d_weight, h_weight_error, d_weight_error, batch_size, memory_size, source_layer_size, sink_layer_size, device, true, true); Eigen::Tensor<float, 2> expected_weight_error(source_layer_size, sink_layer_size); expected_weight_error.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected_weight_error(source_iter, sink_iter), 1e-4); } } // release resources delete[] h_sink_errors; delete[] d_sink_errors; delete[] h_source_outputs; delete[] d_source_outputs; delete[] h_source_inputs; delete[] d_source_inputs; delete[] h_weight_error; delete[] d_weight_error; delete[] h_weight; delete[] d_weight; } BOOST_AUTO_TEST_CASE(sharedWeightErrorsDefaultDevice) { ModelKernalDefaultDevice<float> kernal; const int source_layer_size = 3; const int sink_layer_size = 2; const int n_shared_weights = 2; float* h_shared_weights = new float[source_layer_size * sink_layer_size * n_shared_weights]; float* d_shared_weights = new float[source_layer_size * sink_layer_size * n_shared_weights]; float* h_weight_error = new float[source_layer_size * sink_layer_size]; float* d_weight_error = new float[source_layer_size * sink_layer_size]; Eigen::TensorMap<Eigen::Tensor<float, 3>> shared_weights(h_shared_weights, source_layer_size, sink_layer_size, n_shared_weights); shared_weights.setValues({ {{1, 0}, {1, 0}}, {{0, 1}, {0, 1}}, {{0, 0}, {0, 0}} }); Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size); weight_error.setValues({ {1, 2}, {5, 6}, {3, 4} }); // Set up the device Eigen::DefaultDevice device; bool success = kernal.executeSharedWeightErrors( h_weight_error, d_weight_error, h_shared_weights, d_shared_weights, source_layer_size, sink_layer_size, n_shared_weights, device, true, true); Eigen::Tensor<float, 2> expected_weight_error(source_layer_size, sink_layer_size); expected_weight_error.setValues({ {3, 3}, {11, 11}, {3, 4} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected_weight_error(source_iter, sink_iter), 1e-4); } } // release resources delete[] h_weight_error; delete[] d_weight_error; delete[] h_shared_weights; delete[] d_shared_weights; } BOOST_AUTO_TEST_CASE(weightUpdateDefaultDevice) { const int device_id = 0; ModelKernalDefaultDevice<float> kernal; std::shared_ptr<SolverTensorOp<float, Eigen::DefaultDevice>> solver_function = std::make_shared<SGDTensorOp<float, Eigen::DefaultDevice>>(SGDTensorOp<float, Eigen::DefaultDevice>()); const int source_layer_size = 2; const int sink_layer_size = 1; const int iter = 0; float* h_solver_params = new float[source_layer_size * sink_layer_size * 3]; float* d_solver_params = new float[source_layer_size * sink_layer_size * 3]; float* h_weight = new float[source_layer_size * sink_layer_size]; float* d_weight = new float[source_layer_size * sink_layer_size]; float* h_weight_error = new float[source_layer_size * sink_layer_size]; float* d_weight_error = new float[source_layer_size * sink_layer_size]; Eigen::TensorMap<Eigen::Tensor<float, 3>> solver_params(h_solver_params, source_layer_size, sink_layer_size, 3); solver_params.setValues({ {{0.01, 0.99, 0.0}}, {{0.01, 0.99, 0.0}} }); Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weight, source_layer_size, sink_layer_size); weight.setConstant(1); Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size); weight_error.setValues({ {-0.2}, {-20} }); // Set up the device Eigen::DefaultDevice device; bool success = kernal.executeWeightUpdate( h_weight, d_weight, h_solver_params, d_solver_params, h_weight_error, d_weight_error, solver_function, source_layer_size, sink_layer_size, iter, device, true, true); Eigen::Tensor<float, 2> expected_weights(source_layer_size, sink_layer_size); expected_weights.setValues({ {1.002}, {1.2} }); Eigen::Tensor<float, 3> expected_params(source_layer_size, sink_layer_size, 3); expected_params.setValues({ {{0.01, 0.99, -0.002}}, {{0.01, 0.99, -0.2}} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight(source_iter, sink_iter), expected_weights(source_iter, sink_iter), 1e-4); for (int param_iter = 0; param_iter < 3; ++param_iter) { //std::cout << "[Params] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << ", Param Iter: " << param_iter << " = " << solver_params(source_iter, sink_iter, param_iter) << std::endl; BOOST_CHECK_CLOSE(solver_params(source_iter, sink_iter, param_iter), expected_params(source_iter, sink_iter, param_iter), 1e-4); } } } // release resources delete[] h_solver_params; delete[] d_solver_params; delete[] h_weight; delete[] d_weight; delete[] h_weight_error; delete[] d_weight_error; } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_POPULATIONTRAINERGPU_H #define EVONET_POPULATIONTRAINERGPU_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> // .h #include <EvoNet/ml/PopulationTrainer.h> #include <EvoNet/ml/ModelInterpreterGpu.h> // .cpp namespace EvoNet { /** @brief Class to train a vector of models */ template<typename TensorT> class PopulationTrainerGpu : public PopulationTrainer<TensorT, ModelInterpreterGpu<TensorT>> { }; } #endif #endif //EVONET_POPULATIONTRAINERGPU_H<file_sep>#ifndef EVONET_PREPROCESSING_H #define EVONET_PREPROCESSING_H #include <unsupported/Eigen/CXX11/Tensor> #include <vector> #include <algorithm> #include <map> #include <random> #define _USE_MATH_DEFINES #include <math.h> #define maxFunc(a,b) (((a) > (b)) ? (a) : (b)) #define minFunc(a,b) (((a) < (b)) ? (a) : (b)) namespace EvoNet { /* @brief Methods for data preprocessing, normalization, and random sampling */ template<typename T> T selectRandomElement(const std::vector<T>& elements) { try { // select a random node // based on https://www.rosettacode.org/wiki/Pick_random_element std::random_device seed; std::mt19937 engine(seed()); std::uniform_int_distribution<int> choose(0, elements.size() - 1); return elements.at(choose(engine)); } catch (std::exception& e) { printf("Exception in selectRandomElement: %s", e.what()); } } /* @brief Scale by magnitude of the data */ template<typename T> class UnitScaleFunctor { public: UnitScaleFunctor() {}; UnitScaleFunctor(const Eigen::Tensor<T, 2>& data) { setUnitScale(data); }; ~UnitScaleFunctor() {}; void setUnitScale(const Eigen::Tensor<T, 2>& data) { const Eigen::Tensor<T, 0> max_value = data.maximum(); const Eigen::Tensor<T, 0> min_value = data.minimum(); unit_scale_ = 1 / sqrt(pow(max_value(0) - min_value(0), 2)); } T getUnitScale() { return unit_scale_; } T operator()(const T& x_I) const { return x_I * unit_scale_; }; private: T unit_scale_; }; /* @brief Project the data onto a specific range */ template<typename T> class LinearScaleFunctor { public: LinearScaleFunctor() = default; LinearScaleFunctor(const T& domain_min, const T& domain_max, const T& range_min, const T& range_max): domain_min_(domain_min), domain_max_(domain_max), range_min_(range_min), range_max_(range_max){}; ~LinearScaleFunctor() = default; T operator()(const T& x_I) const { T t = (x_I - domain_min_) / (domain_max_ - domain_min_); return (range_min_ + (range_max_ - range_min_) * t); }; private: T domain_min_; T domain_max_; T range_min_; T range_max_; }; /* @brief Project the data onto a specific range */ template<typename T, int N> class LinearScale { public: LinearScale() = default; LinearScale(const Eigen::Tensor<T, N>& data, const T& range_min, const T& range_max) : range_min_(range_min), range_max_(range_max) { setDomain(data); } LinearScale(const T& range_min, const T& range_max) : range_min_(range_min), range_max_(range_max) {} LinearScale(const T& domain_min, const T& domain_max, const T& range_min, const T& range_max) : domain_min_(domain_min), domain_max_(domain_max), range_min_(range_min), range_max_(range_max) {} ~LinearScale() = default; void setDomain(const T& domain_min, const T& domain_max) { domain_min_ = domain_min; domain_max_ = domain_max; } void setDomain(const Eigen::Tensor<T, N>& data) { const Eigen::Tensor<T, 0> max_value = data.maximum(); const Eigen::Tensor<T, 0> min_value = data.minimum(); domain_max_ = max_value(0); domain_min_ = min_value(0); } Eigen::Tensor<T, N> operator()(const Eigen::Tensor<T, N>& data) const { auto t = (data - data.constant(domain_min_)) / data.constant(domain_max_ - domain_min_); const Eigen::Tensor<T, N> data_linear = data.constant(range_min_) + data.constant(range_max_ - range_min_) * t; return data_linear; }; private: T domain_min_; T domain_max_; T range_min_; T range_max_; }; /* @brief Standardize the data using the Mean and Standard Deviation */ template<typename T, int N> class Standardize { public: Standardize() = default; Standardize(const Eigen::Tensor<T, N>& data) { setMeanAndVar(data); }; Standardize(const T& mean, const T& var): mean_(mean), var_(var) {}; ~Standardize() = default; void setMeanAndVar(const T& mean, const T& var) { mean_ = mean; var_ = var; } void setMeanAndVar(const Eigen::Tensor<T, N>& data) { // calculate the total dimensions int dim_size_tot = 1; for (int i = 0; i < N; ++i) { dim_size_tot *= data.dimension(i); } // calculate the mean Eigen::Tensor<T, 0> mean_0d = data.mean(); mean_ = mean_0d(0); // calculate the var auto residuals = data - data.constant(mean_); auto ssr = residuals.pow(2).sum(); Eigen::Tensor<T, 0> var_0d = ssr / ssr.constant(dim_size_tot - 1); var_ = var_0d(0); } T getMean() { return mean_; } T getVar() { return var_; } Eigen::Tensor<T, N> operator()(const Eigen::Tensor<T, N>& data) const { const Eigen::Tensor<T, N> data_stand = (data - data.constant(mean_)) / data.constant(var_).pow(T(0.5)); return data_stand; }; private: T mean_; ///< data set mean T var_; ///< data set var }; /* @brief Generate a permutation Matrix that will randomly shuffle the order of the columns (Data * Permut) or rows (Permut * Data) when applied to the original Matrix */ template<typename T> class MakeShuffleMatrix { public: MakeShuffleMatrix() = default; MakeShuffleMatrix(const std::vector<int>& indices, const bool& shuffle_cols) : indices_(indices) { setShuffleMatrix(shuffle_cols); }; MakeShuffleMatrix(const int& shuffle_dim_size, const bool& shuffle_cols) { setIndices(shuffle_dim_size); setShuffleMatrix(shuffle_cols); }; ~MakeShuffleMatrix() = default; void setIndices(const int& shuffle_dim_size) { // initialize the indices indices_.clear(); indices_.reserve(shuffle_dim_size); for (int i = 0; i < shuffle_dim_size; ++i) indices_.push_back(i); // randomize the indices auto rng = std::default_random_engine{}; std::shuffle(std::begin(indices_), std::end(indices_), rng); } std::vector<int> getIndices() const { return indices_; } void setShuffleMatrix(const bool& shuffle_cols) { // initialize the shuffle matrix assert(indices_.size() > 0); shuffle_matrix_.resize(int(indices_.size()), int(indices_.size())); shuffle_matrix_.setZero(); // specify the ones in the shuffle matrix for (int dim_iter = 0; dim_iter < indices_.size(); ++dim_iter) { if (shuffle_cols) shuffle_matrix_(indices_.at(dim_iter), dim_iter) = T(1); else shuffle_matrix_(dim_iter, indices_.at(dim_iter)) = T(1); } }; Eigen::Tensor<T, 2> getShuffleMatrix() const { return shuffle_matrix_; }; template<typename TT = T, std::enable_if_t<!std::is_same<TT, double>::value && std::is_same<TT, T>::value, int> = 0> void operator()(Eigen::Tensor<TT, 2>& data, const bool& shuffle_cols) { //if (shuffle_cols) data = data.contract(shuffle_matrix_, Eigen::array<Eigen::IndexPair<Eigen::Index>, 1>({ Eigen::IndexPair<Eigen::Index>(1, 0) })).eval(); //else data = shuffle_matrix_.contract(data, Eigen::array<Eigen::IndexPair<Eigen::Index>, 1>({ Eigen::IndexPair<Eigen::Index>(1, 0) })).eval(); if (shuffle_cols) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> data_values(data.data(), data.dimension(0), data.dimension(1), 1); Eigen::TensorMap<Eigen::Tensor<TT, 3>> shuffle_matrix_values(shuffle_matrix_.data(), 1, shuffle_matrix_.dimension(0), shuffle_matrix_.dimension(1)); auto data_values_bcast = data_values.broadcast(Eigen::array<Eigen::Index, 3>({ 1, 1, data.dimension(1) })); auto shuffle_matrix_values_bcast = shuffle_matrix_values.broadcast(Eigen::array<Eigen::Index, 3>({ data.dimension(0), 1, 1 })); data = (data_values_bcast * shuffle_matrix_values_bcast).sum(Eigen::array<Eigen::Index, 1>({ 1 })).eval(); } else { Eigen::TensorMap<Eigen::Tensor<TT, 3>> data_values(data.data(), 1, data.dimension(0), data.dimension(1)); Eigen::TensorMap<Eigen::Tensor<TT, 3>> shuffle_matrix_values(shuffle_matrix_.data(), shuffle_matrix_.dimension(0), shuffle_matrix_.dimension(1), 1); auto data_values_bcast = data_values.broadcast(Eigen::array<Eigen::Index, 3>({ data.dimension(0), 1, 1 })); auto shuffle_matrix_values_bcast = shuffle_matrix_values.broadcast(Eigen::array<Eigen::Index, 3>({ 1, 1, data.dimension(1) })); data = (data_values_bcast * shuffle_matrix_values_bcast).sum(Eigen::array<Eigen::Index, 1>({ 1 })).eval(); } }; template<typename TT = T, std::enable_if_t<std::is_same<TT, double>::value && std::is_same<TT, T>::value, int> = 0> void operator()(Eigen::Tensor<TT, 2>& data, const bool& shuffle_cols) { if (shuffle_cols) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> data_values(data.data(), data.dimension(0), data.dimension(1), 1); Eigen::TensorMap<Eigen::Tensor<TT, 3>> shuffle_matrix_values(shuffle_matrix_.data(), 1, shuffle_matrix_.dimension(0), shuffle_matrix_.dimension(1)); auto data_values_bcast = data_values.broadcast(Eigen::array<Eigen::Index, 3>({ 1, 1, data.dimension(1) })); auto shuffle_matrix_values_bcast = shuffle_matrix_values.broadcast(Eigen::array<Eigen::Index, 3>({ data.dimension(0), 1, 1 })); data = (data_values_bcast * shuffle_matrix_values_bcast).sum(Eigen::array<Eigen::Index, 1>({ 1 })).eval(); } else { Eigen::TensorMap<Eigen::Tensor<TT, 3>> data_values(data.data(), 1, data.dimension(0), data.dimension(1)); Eigen::TensorMap<Eigen::Tensor<TT, 3>> shuffle_matrix_values(shuffle_matrix_.data(), shuffle_matrix_.dimension(0), shuffle_matrix_.dimension(1), 1); auto data_values_bcast = data_values.broadcast(Eigen::array<Eigen::Index, 3>({ data.dimension(0), 1, 1 })); auto shuffle_matrix_values_bcast = shuffle_matrix_values.broadcast(Eigen::array<Eigen::Index, 3>({ 1, 1, data.dimension(1) })); data = (data_values_bcast * shuffle_matrix_values_bcast).sum(Eigen::array<Eigen::Index, 1>({ 1 })).eval(); } }; private: std::vector<int> indices_; ///< indices used to create the shuffle matrix Eigen::Tensor<T, 2> shuffle_matrix_; }; /* @brief "Smooth" binary labels 0 and 1 by a certain offset */ template<typename T> class LabelSmoother { public: LabelSmoother() = default; LabelSmoother(const T& zero_offset, const T& one_offset) : zero_offset_(zero_offset), one_offset_(one_offset) {}; ~LabelSmoother() = default; T operator()(const T& x_I) const { const T eps = 1e-3; if (x_I < eps) return x_I + zero_offset_; else if (x_I > 1 - eps) return x_I - one_offset_; else return x_I; }; private: T zero_offset_; T one_offset_; }; /* @brief One hot encoder @param[in] data Tensor of input labels in a single column vector @param{in] all_possible_values @returns an integer tensor where all rows have been expanded across the columns with the one hot encoding */ template<typename Ta, typename Tb> Eigen::Tensor<Tb, 2> OneHotEncoder(const Eigen::Tensor<Ta, 2>& data, const std::vector<Ta>& all_possible_values) { // integer encode input data std::map<Ta, int> T_to_int; for (int i = 0; i<all_possible_values.size(); ++i) T_to_int.emplace(all_possible_values[i], i); // convert to 1 hot vector Eigen::Tensor<Tb, 2> onehot_encoded(data.dimension(0), (int)T_to_int.size()); onehot_encoded.setZero(); for (int i = 0; i<data.dimension(0); ++i) onehot_encoded(i, T_to_int.at(data(i, 0))) = 1; return onehot_encoded; } /* @brief One hot encoder @param[in] data input label @param{in] all_possible_values @returns an integer tensor with the one hot encoding */ template<typename Ta, typename Tb> Eigen::Tensor<Tb, 1> OneHotEncoder(const Ta& data, const std::vector<Ta>& all_possible_values) { // integer encode input data std::map<Ta, int> T_to_int; for (int i = 0; i<all_possible_values.size(); ++i) T_to_int.emplace(all_possible_values[i], i); // convert to 1 hot vector Eigen::Tensor<Tb, 1> onehot_encoded(T_to_int.size()); onehot_encoded.setConstant(0); onehot_encoded(T_to_int.at(data)) = 1; return onehot_encoded; } /* @brief One hot categorical sampler @param[in] n_labels the number of categorical labels @returns an integer tensor with the one hot encoding */ template<typename Ta> Eigen::Tensor<Ta, 1> OneHotCategorical(const int& n_labels) { std::random_device seed; std::mt19937 engine(seed()); std::uniform_int_distribution<int> choose(0, n_labels - 1); Eigen::Tensor<Ta, 1> onehot_encoded(n_labels); onehot_encoded.setZero(); onehot_encoded(choose(engine)) = 1; return onehot_encoded; } template<typename Ta> std::pair<Ta, Ta> GaussianMixtureSampler(const Ta& x, const Ta& y, const int& label, const int& n_labels) { const Ta shift = 1.4; const Ta r = 2.0 * M_PI / Ta(n_labels) * Ta(label); Ta new_x = x * std::cos(r) - y * std::sin(r); Ta new_y = x * std::sin(r) + y * std::cos(r); new_x += shift * std::cos(r); new_y += shift * std::sin(r); return std::make_pair(new_x, new_y); }; /* @brief 2D Gaussian mixture sampler @param[in] n_dims the number of categorical labels @param[in] n_labels the number of categorical labels @returns a Tensor of gaussian mixture samples */ template<typename Ta> Eigen::Tensor<Ta, 1> GaussianMixture(const int& n_dims, const int& n_labels, int label = -1) { assert(n_dims % 2 == 0); std::random_device rd{}; std::mt19937 gen{ rd() }; // make the gaussian mixture tensor Eigen::Tensor<Ta, 1> gaussian_mixture(n_dims); gaussian_mixture.setZero(); const Ta x_var = 0.5; const Ta y_var = 0.05; int i = 0; while (i < n_dims) { // random integer if (label == -1) { std::random_device seed; std::mt19937 engine(seed()); std::uniform_int_distribution<int> choose(0, n_labels - 1); label = choose(engine); } // sample from the mixture std::normal_distribution<> dx{ 0.0f, x_var }; std::normal_distribution<> dy{ 0.0f, y_var }; std::pair<Ta, Ta> samples = GaussianMixtureSampler<Ta>(dx(gen), dy(gen), label, n_labels); gaussian_mixture(i) = samples.first; gaussian_mixture(i + 1) = samples.second; i += 2; } return gaussian_mixture; } template<typename Ta> std::pair<Ta, Ta> SwissRollSampler(const int& label, const int& n_labels) { std::random_device rd{}; std::mt19937 gen{ rd() }; std::uniform_real_distribution<> dist{ 0, 1 }; const Ta uni = Ta(dist(gen)) / Ta(n_labels) + Ta(label) / Ta(n_labels); const Ta r = std::sqrt(uni) * 3.0; const Ta rad = M_PI * 4.0 * sqrt(uni); Ta new_x = r * std::cos(rad); Ta new_y = r * std::sin(rad); return std::make_pair(new_x, new_y); }; /* @brief 2D Swiss roll sampler @param[in] n_dims the number of categorical labels @param[in] n_labels the number of categorical labels @returns a Tensor of gaussian mixture samples */ template<typename Ta> Eigen::Tensor<Ta, 1> SwissRoll(const int& n_dims, const int& n_labels, int label = -1) { assert(n_dims % 2 == 0); // make the gaussian mixture tensor Eigen::Tensor<Ta, 1> swiss_roll(n_dims); swiss_roll.setZero(); int i = 0; while (i < n_dims) { // random integer if (label == -1) { std::random_device seed; std::mt19937 engine(seed()); std::uniform_int_distribution<int> choose(0, n_labels - 1); label = choose(engine); } // sample from the mixture std::pair<Ta, Ta> samples = SwissRollSampler<Ta>(label, n_labels); swiss_roll(i) = samples.first; swiss_roll(i + 1) = samples.second; i += 2; } return swiss_roll; } /* @brief 1D Gumbel sampler where the Gumbel(0; 1) distribution can be sampled using inverse transform sampling by drawing u Uniform(0; 1) and computing g = -log(-log(u + EPS) + EPS). @param[in] n_dims the number of categorical labels @param[in] n_labels the number of categorical labels @returns a Tensor of Gumbel samples */ template<typename Ta> Eigen::Tensor<Ta, 2> GumbelSampler(const int& n_dims, const int& n_labels) { std::random_device rd{}; std::mt19937 gen{ rd() }; std::uniform_real_distribution<> dist{ 0, 1 }; Eigen::Tensor<Ta, 2> gumbel_dist(n_dims, n_labels); gumbel_dist = -(-(gumbel_dist.unaryExpr([&gen, &dist](const Ta& elem) { return Ta(dist(gen)); }) + gumbel_dist.constant(Ta(1e-12))).log() + gumbel_dist.constant(Ta(1e-12))).log(); return gumbel_dist; }; /* @brief 1D Gumbel sampler where the Gumbel(0; 1) distribution can be sampled using inverse transform sampling by drawing u  Uniform(0; 1) and computing g = -log(-log(u)). @param[in] batch_size @param[in] memory_size @param[in] encoding_size @param[in] n_epochs @returns a Tensor of Gumbel samples */ template<typename Ta> Eigen::Tensor<Ta, 4> GumbelSampler(const int& batch_size, const int& memory_size, const int& encoding_size, const int& n_epochs) { std::random_device rd{}; std::mt19937 gen{ rd() }; std::uniform_real_distribution<> dist{ 0, 1 }; Eigen::Tensor<Ta, 4> gumbel_dist(batch_size, memory_size, encoding_size, n_epochs); gumbel_dist = -(-(gumbel_dist.unaryExpr([&gen, &dist](const Ta& elem) { return Ta(dist(gen)); })).log()).log(); return gumbel_dist; }; /* @brief 1D Gaussian sampler @param[in] n_dims the number of gaussian labels @param[in] n_labels the number of gaussian labels @returns a Tensor of Gaussian samples */ template<typename Ta> Eigen::Tensor<Ta, 2> GaussianSampler(const int& n_dims, const int& n_labels) { std::random_device rd{}; std::mt19937 gen{ rd() }; std::normal_distribution<> dist{ 0.0f, 1.0f }; Eigen::Tensor<Ta, 2> gaussian_dist(n_dims, n_labels); gaussian_dist = gaussian_dist.unaryExpr([&gen, &dist](const Ta& elem) { return Ta(dist(gen)); }); return gaussian_dist; }; /* @brief 1D Gaussian sampler @param[in] batch_size @param[in] memory_size @param[in] encoding_size @param[in] n_epochs @returns a Tensor of Gaussian samples */ template<typename Ta> Eigen::Tensor<Ta, 4> GaussianSampler(const int& batch_size, const int& memory_size, const int& encoding_size, const int& n_epochs) { std::random_device rd{}; std::mt19937 gen{ rd() }; std::normal_distribution<> dist{ 0.0f, 1.0f }; Eigen::Tensor<Ta, 4> gaussian_dist(batch_size, memory_size, encoding_size, n_epochs); gaussian_dist = gaussian_dist.unaryExpr([&gen, &dist](const Ta& elem) { return Ta(dist(gen)); }); return gaussian_dist; }; /** @brief Replaces NaN and Inf with 0 */ template<typename T> T checkNan( const T& x) { if (std::isnan(x)) return T(0); else return x; } /** @brief Replaces NaN and Inf with 0 or 1e9 respectively */ template<typename T> T substituteNanInf(const T& x) { if (x == std::numeric_limits<T>::infinity()) return T(1e9); else if (x == -std::numeric_limits<T>::infinity()) return T(-1e9); else if (std::isnan(x)) return T(0); else return x; } /** @brief Clip */ template<typename T> class ClipOp { public: ClipOp() = default; ClipOp(const T& eps, const T& min, const T& max) : eps_(eps), min_(min), max_(max) {}; ~ClipOp() = default; T operator()(const T& x) const { if (x < min_ + eps_) return min_ + eps_; else if (x > max_ - eps_) return max_ - eps_; else return x; } private: T eps_ = 1e-12; ///< threshold to clip between min and max T min_ = 0; T max_ = 1; }; /** @brief return x or 0 with a specified probability */ template<typename T> class RandBinaryOp { public: RandBinaryOp() = default; RandBinaryOp(const T& p) : p_(p) {}; ~RandBinaryOp() = default; T operator()(const T& x) const { std::random_device rd; std::mt19937 gen(rd()); std::discrete_distribution<> distrib({ p_, 1 - p_ }); return x * (T)distrib(gen); } private: T p_ = (T)1; ///< probablity of 0 }; /** @brief Scale a tensor by a specified value */ template<typename TensorT> class ScaleOp { public: ScaleOp() = default; ScaleOp(const TensorT& scale) : scale_(scale) {}; ~ScaleOp() = default; TensorT operator()(const TensorT& x_I) const { return x_I * scale_; } private: TensorT scale_ = 1; }; /** @brief Offset a tensor by a specified value implemented as new_value = value + offset */ template<typename TensorT> class OffsetOp { public: OffsetOp() = default; OffsetOp(const TensorT& offset) : offset_(offset) {}; ~OffsetOp() = default; TensorT operator()(const TensorT& x_I) const { return x_I + offset_; } private: TensorT offset_ = 0; }; /* @brief Test absolute and relative closeness of values References: http://realtimecollisiondetection.net/blog/?p=89 @param: lhs Left Hand Side to compare @param: rhs Right Hand Side to Compare @param: rel_tol Relative Tolerance threshold @param: abs_tol Absolute Tolerance threshold @returns True or False */ template<typename T> bool assert_close(const T& lhs, const T& rhs, T rel_tol = 1e-4, T abs_tol = 1e-4) { return (std::fabs(lhs - rhs) <= maxFunc(abs_tol, rel_tol * maxFunc(fabs(lhs), fabs(rhs)) )); } ///** //@brief Functor for use with base classes. //*/ //template<typename TensorT, typename FunctorT> //class BaseClassFunctor //{ //public: // BaseClassFunctor() {}; // BaseClassFunctor(FunctorT<TensorT>* functor) : functor_(functor) {}; // ~BaseClassFunctor() {}; // TensorT operator()(const TensorT& x_I) const { // return (*functor_)(x_I); // } //private: // FunctorT<TensorT>* functor_; //}; } #endif //EVONET_PREPROCESSING_H<file_sep>### CMake EvoNet config file for external code ### configured by the EvoNet build system from <EvoNet>/cmake/EvoNetConfig.cmake.in # we need this to reference the target file get_filename_component(EVONET_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) # include directories for targets @_EXPORT_INCLUDE_BLOCK@ set(EVONET_ADDCXX_FLAGS "@CF_EVONET_ADDCXX_FLAGS@") ## The targets file include("${EVONET_CMAKE_DIR}/@_EVONET_EXPORT_FILE@") <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelInterpreterCpu test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> #include <EvoNet/simulator/HarmonicOscillatorSimulator.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(modelInterpreterCpu) /** * Part 2 test suit for the ModelInterpreter class * * The following methods test cpu-based methods */ BOOST_AUTO_TEST_CASE(allocateModelErrorTensor) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 2; const int n_metrics = 3; model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); BOOST_CHECK_EQUAL(model_interpreter.getModelError()->getBatchSize(), 4); BOOST_CHECK_EQUAL(model_interpreter.getModelError()->getMemorySize(), 2); BOOST_CHECK_EQUAL(model_interpreter.getModelError()->getNMetrics(), 3); } BOOST_AUTO_TEST_CASE(reInitNodes) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 2; // TODO: test for differences between SumOp and ProdOp/ProdSCOp integration types } BOOST_AUTO_TEST_CASE(reInitModelError) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 2; const int n_metrics = 1; model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); Eigen::Tensor<float, 2> ones(batch_size, memory_size); ones.setConstant(1); model_interpreter.getModelError()->getError() = ones; Eigen::Tensor<float, 2> twos(n_metrics, memory_size); twos.setConstant(2); model_interpreter.getModelError()->getMetric() = twos; BOOST_CHECK_EQUAL(model_interpreter.getModelError()->getError()(0, 0), 1); BOOST_CHECK_EQUAL(model_interpreter.getModelError()->getMetric()(0, 0), 2); model_interpreter.reInitModelError(); BOOST_CHECK_EQUAL(model_interpreter.getModelError()->getError()(0, 0), 0); BOOST_CHECK_EQUAL(model_interpreter.getModelError()->getMetric()(0, 0), 0); } // BUG in addWeightTensor BOOST_AUTO_TEST_CASE(updateSolverParams) { ModelInterpreterDefaultDevice<float> model_interpreter; // Make a dummy weight tensor data and add it to the model interpreter WeightTensorDataCpu<float> weight_data; std::vector<float> solver_params = { 2, 3, 0.8 }; std::vector<std::pair<int, int>> weight_indices = { std::make_pair(0,0), std::make_pair(0,1),std::make_pair(1,0),std::make_pair(1,1) }; std::map<std::string, std::vector<std::pair<int, int>>> shared_weight_indices = {}; std::vector<float> weight_values = { 0, 0, 0, 0 }; weight_data.initWeightTensorData(2, 2, weight_indices, shared_weight_indices, weight_values, true, solver_params, "SumOp"); std::shared_ptr<WeightTensorData<float, Eigen::DefaultDevice>> weight_data_ptr = std::make_shared<WeightTensorDataCpu<float>>(weight_data); // Test that the learning rate was updated model_interpreter.addWeightTensor(weight_data_ptr); model_interpreter.updateSolverParams(0, 2); assert(model_interpreter.getWeightTensor(0)->getSolverParams()(0, 0, 0) == 4); assert(model_interpreter.getWeightTensor(0)->getSolverParams()(0, 0, 1) == 3); assert(model_interpreter.getWeightTensor(0)->getSolverParams()(1, 0, 0) == 4); assert(model_interpreter.getWeightTensor(0)->getSolverParams()(0, 1, 0) == 4); assert(model_interpreter.getWeightTensor(0)->getSolverParams()(1, 1, 0) == 4); } Model<float> makeModelToy1() { /** * Directed Acyclic Graph Toy Network Model */ Node<float> i1, i2, h1, h2, o1, o2, b1, b2; Link l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4; Weight<float> w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4; Model<float> model_FC_Sum; // Toy network: 1 hidden layer, fully connected, DAG i1 = Node<float>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h2 = Node<float>("3", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("4", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("5", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<float>("6", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<float>("7", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1 = Weight<float>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2 = Weight<float>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3 = Weight<float>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w4 = Weight<float>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb1 = Weight<float>("4", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb2 = Weight<float>("5", weight_init, solver); // input layer + bias l1 = Link("0", "0", "2", "0"); l2 = Link("1", "0", "3", "1"); l3 = Link("2", "1", "2", "2"); l4 = Link("3", "1", "3", "3"); lb1 = Link("4", "6", "2", "4"); lb2 = Link("5", "6", "3", "5"); // weights weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w5 = Weight<float>("6", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w6 = Weight<float>("7", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w7 = Weight<float>("8", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w8 = Weight<float>("9", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb3 = Weight<float>("10", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb4 = Weight<float>("11", weight_init, solver); // hidden layer + bias l5 = Link("6", "2", "4", "6"); l6 = Link("7", "2", "5", "7"); l7 = Link("8", "3", "4", "8"); l8 = Link("9", "3", "5", "9"); lb3 = Link("10", "7", "4", "10"); lb4 = Link("11", "7", "5", "11"); model_FC_Sum.setId(1); model_FC_Sum.addNodes({ i1, i2, h1, h2, o1, o2, b1, b2 }); model_FC_Sum.addWeights({ w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4 }); model_FC_Sum.addLinks({ l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4 }); return model_FC_Sum; } Model<float> model_allocateForwardPropogationLayerTensors = makeModelToy1(); BOOST_AUTO_TEST_CASE(allocateForwardPropogationLayerTensors) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 2; const bool train = true; // initialize nodes // NOTE: input and biases have been activated when the model was created // change the bias weights to shared model_allocateForwardPropogationLayerTensors.links_.at("5")->setWeightName("4"); // Check iteration one with no source/sink/weight tensors already allocated std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_allocateForwardPropogationLayerTensors, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_allocateForwardPropogationLayerTensors, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); std::map<int, int> max_layer_sizes; std::map<std::string, int> layer_name_pos; std::vector<int> source_layer_sizes, sink_layer_sizes; std::vector<std::vector<std::pair<int, int>>> weight_indices; std::vector<std::map<std::string, std::vector<std::pair<int, int>>>> shared_weight_indices; std::vector<std::vector<float>> weight_values; std::vector<bool> make_source_tensors, make_sink_tensors, make_weight_tensors; std::vector<int> source_layer_pos, sink_layer_pos; model_interpreter.getForwardPropogationLayerTensorDimensions(FP_operations_expanded, tensor_ops, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, source_layer_pos, sink_layer_pos, max_layer_sizes, layer_name_pos, 0, 0); model_interpreter.allocateForwardPropogationLayerTensors(FP_operations_expanded, tensor_ops, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, batch_size, memory_size, train); // asserts are needed because boost deallocates the pointer memory after being called... assert(model_interpreter.getLayerTensor(0)->getBatchSize()==batch_size); // sinks assert(model_interpreter.getLayerTensor(0)->getMemorySize() == memory_size); // sinks assert(model_interpreter.getLayerTensor(0)->getLayerSize() == 2); // sinks assert(model_interpreter.getLayerTensor(1)->getBatchSize() == batch_size); // sources assert(model_interpreter.getLayerTensor(1)->getMemorySize() == memory_size); // sources assert(model_interpreter.getLayerTensor(1)->getLayerSize() == 3); // sources assert(model_interpreter.getWeightTensor(0)->getLayer1Size() == 3); assert(model_interpreter.getWeightTensor(0)->getLayer2Size() == 2); assert(model_interpreter.getWeightTensor(0)->getNSolverParams() == 3); assert(model_interpreter.getWeightTensor(0)->getNSharedWeights() == 1); assert(model_interpreter.getOperationSteps(0)[0].source_layer.time_step == 0); assert(model_interpreter.getOperationSteps(0)[0].source_layer.activation->getName() == "LinearTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].source_layer.activation_grad->getName() == "LinearGradTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].source_layer.integration->getName() == "SumTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].source_layer.integration_error->getName() == "SumErrorTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].source_layer.integration_weight_grad->getName() == "SumWeightGradTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].sink_layer.time_step == 0); assert(model_interpreter.getOperationSteps(0)[0].sink_layer.activation->getName() == "ReLUTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].sink_layer.activation_grad->getName() == "ReLUGradTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].sink_layer.integration->getName() == "SumTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].sink_layer.integration_error->getName() == "SumErrorTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].sink_layer.integration_weight_grad->getName() == "SumWeightGradTensorOp"); assert(model_interpreter.getOperationSteps(0)[0].weight.solver->getName() == "SGDTensorOp"); } Model<float> model_printTensorOpsSteps = makeModelToy1(); BOOST_AUTO_TEST_CASE(printTensorOpsSteps) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const bool train = true; // initialize nodes // NOTE: input and biases have been activated when the model was created model_interpreter.getForwardPropogationOperations(model_printTensorOpsSteps, batch_size, memory_size, train, false, true, true); model_interpreter.printTensorOpsSteps(); } Model<float> model_getForwardPropogationOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(getForwardPropogationOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const bool train = true; // initialize nodes // NOTE: input and biases have been activated when the model was created // change the bias weights to shared model_getForwardPropogationOperations.links_.at("5")->setWeightName("4"); model_interpreter.getForwardPropogationOperations(model_getForwardPropogationOperations, batch_size, memory_size, train, false, true, true); // asserts are needed because boost deallocates the pointer memory after being called... int expected_layer_tensors = 4; for (int i = 0; i < expected_layer_tensors; ++i) { //std::cout << "Layer batch size (" << i << "): " << model_interpreter.getLayerTensor(i)->getBatchSize() << std::endl; //std::cout << "Layer memory size (" << i << "): " << model_interpreter.getLayerTensor(i)->getMemorySize() << std::endl; //std::cout << "Layer memory size (" << i << "): " << model_interpreter.getLayerTensor(i)->getLayerSize() << std::endl; assert(model_interpreter.getLayerTensor(i)->getBatchSize() == batch_size); // sinks assert(model_interpreter.getLayerTensor(i)->getMemorySize() == memory_size + 1); // sinks if (i == 0) { assert(model_interpreter.getLayerTensor(i)->getLayerSize() == 2); // sinks } else if (i == 1) { assert(model_interpreter.getLayerTensor(i)->getLayerSize() == 3); // sources } else if (i == 2) { assert(model_interpreter.getLayerTensor(i)->getLayerSize() == 2); // sink } else if (i == 3) { assert(model_interpreter.getLayerTensor(i)->getLayerSize() == 1); // sources } } int expected_weight_tensors = 3; for (int i = 0; i < expected_weight_tensors; ++i) { //std::cout << "Weight Layer1 size (" << i << "): " << model_interpreter.getWeightTensor(i)->getLayer1Size() << std::endl; //std::cout << "Weight Layer1 size (" << i << "): " << model_interpreter.getWeightTensor(i)->getLayer2Size() << std::endl; //std::cout << "Weight NParams size (" << i << "): " << model_interpreter.getWeightTensor(i)->getNSolverParams() << std::endl; assert(model_interpreter.getWeightTensor(i)->getNSolverParams() == 3); if (i == 0) { assert(model_interpreter.getWeightTensor(i)->getLayer1Size() == 3); assert(model_interpreter.getWeightTensor(i)->getLayer2Size() == 2); assert(model_interpreter.getWeightTensor(i)->getNSharedWeights() == 1); } else if (i == 1) { assert(model_interpreter.getWeightTensor(i)->getLayer1Size() == 1); assert(model_interpreter.getWeightTensor(i)->getLayer2Size() == 2); assert(model_interpreter.getWeightTensor(i)->getNSharedWeights() == 0); } else if (i == 2) { assert(model_interpreter.getWeightTensor(i)->getLayer1Size() == 2); assert(model_interpreter.getWeightTensor(i)->getLayer2Size() == 2); assert(model_interpreter.getWeightTensor(i)->getNSharedWeights() == 0); } } std::vector<int> expected_operation_steps = { 1, 2 }; for (int i = 0; i < expected_operation_steps.size(); ++i) { for (int j = 0; j < expected_operation_steps[i]; ++j) { //std::cout << "Source Layer Time Step (" << i << "): " << model_interpreter.getOperationSteps(i)[j].source_layer.time_step << std::endl; //std::cout << "Sink Layer Time Step (" << i << "): " << model_interpreter.getOperationSteps(i)[j].sink_layer.time_step << std::endl; assert(model_interpreter.getOperationSteps(i)[j].source_layer.time_step == 0); assert(model_interpreter.getOperationSteps(i)[j].sink_layer.time_step == 0); assert(model_interpreter.getOperationSteps(i)[j].sink_layer.integration->getName() == "SumTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].sink_layer.integration_error->getName() == "SumErrorTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].sink_layer.integration_weight_grad->getName() == "SumWeightGradTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].sink_layer.activation->getName() == "ReLUTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].sink_layer.activation_grad->getName() == "ReLUGradTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].weight.solver->getName() == "SGDTensorOp"); if (j == 0) { assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration->getName() == "SumTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration_error->getName() == "SumErrorTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration_weight_grad->getName() == "SumWeightGradTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.activation->getName() == "LinearTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.activation_grad->getName() == "LinearGradTensorOp"); } else if (i == 1 && j == 1) { assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration->getName() == "SumTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration_error->getName() == "SumErrorTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration_weight_grad->getName() == "SumWeightGradTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.activation->getName() == "ReLUTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.activation_grad->getName() == "ReLUGradTensorOp"); } else { assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration->getName() == "SumTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration_error->getName() == "SumErrorTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.integration_weight_grad->getName() == "SumWeightGradTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.activation->getName() == "LinearTensorOp"); assert(model_interpreter.getOperationSteps(i)[j].source_layer.activation_grad->getName() == "LinearGradTensorOp"); } } } } Model<float> model_mapValuesToLayers = makeModelToy1(); BOOST_AUTO_TEST_CASE(mapValuesToLayers) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const bool train = true; // initialize nodes // NOTE: input and biases have been activated when the model was created model_interpreter.getForwardPropogationOperations(model_mapValuesToLayers, batch_size, memory_size, train, false, true, true); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); auto node0 = model_mapValuesToLayers.getNode("0"); auto node1 = model_mapValuesToLayers.getNode("1"); model_interpreter.mapValuesToLayers(model_mapValuesToLayers, input, node_ids, "output"); for (int i = 0; i < batch_size; ++i){ for (int j = 0; j < memory_size; ++j){ BOOST_CHECK_EQUAL(model_interpreter.getLayerTensor(node0.getTensorIndex().first)->getOutput()(i, j, node0.getTensorIndex().second), input(i, j, 0)); BOOST_CHECK_EQUAL(model_interpreter.getLayerTensor(node1.getTensorIndex().first)->getOutput()(i, j, node1.getTensorIndex().second), input(i, j, 1)); } } model_interpreter.mapValuesToLayers(model_mapValuesToLayers, input, node_ids, "derivative"); for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { BOOST_CHECK_EQUAL(model_interpreter.getLayerTensor(node0.getTensorIndex().first)->getDerivative()(i, j, node0.getTensorIndex().second), input(i, j, 0)); BOOST_CHECK_EQUAL(model_interpreter.getLayerTensor(node1.getTensorIndex().first)->getDerivative()(i, j, node1.getTensorIndex().second), input(i, j, 1)); } } model_interpreter.mapValuesToLayers(model_mapValuesToLayers, input, node_ids, "error"); for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { BOOST_CHECK_EQUAL(model_interpreter.getLayerTensor(node0.getTensorIndex().first)->getError()(i, j, node0.getTensorIndex().second), input(i, j, 0)); BOOST_CHECK_EQUAL(model_interpreter.getLayerTensor(node1.getTensorIndex().first)->getError()(i, j, node1.getTensorIndex().second), input(i, j, 1)); } } model_interpreter.mapValuesToLayers(model_mapValuesToLayers, input, node_ids, "dt"); for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { BOOST_CHECK_EQUAL(model_interpreter.getLayerTensor(node0.getTensorIndex().first)->getDt()(i, j, node0.getTensorIndex().second), input(i, j, 0)); BOOST_CHECK_EQUAL(model_interpreter.getLayerTensor(node1.getTensorIndex().first)->getDt()(i, j, node1.getTensorIndex().second), input(i, j, 1)); } } } Model<float> model_executeForwardPropogationOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(executeForwardPropogationOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const bool train = true; // compile the graph into a set of operations model_interpreter.getForwardPropogationOperations(model_executeForwardPropogationOperations, batch_size, memory_size, train, false, true, true); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); model_interpreter.mapValuesToLayers(model_executeForwardPropogationOperations, input, node_ids, "output"); // create the bias model_interpreter.initBiases(model_executeForwardPropogationOperations); model_interpreter.executeForwardPropogationOperations(0); // test values of output nodes Eigen::Tensor<float, 2> output(batch_size, 2); output.setValues({ {15, 15}, {19, 19}, {23, 23}, {27, 27} }); Eigen::Tensor<float, 2> net_input(batch_size, 2); net_input.setValues({ { 15, 15 },{ 19, 19 },{ 23, 23 },{ 27, 27 } }); // TODO: include full memory size const std::vector<std::string> output_nodes = { "4", "5" }; auto nodes_map = model_executeForwardPropogationOperations.getNodesMap(); for (int i = 0; i < (int)output_nodes.size(); ++i) { const std::string node_name = output_nodes[i]; for (int j = 0; j < batch_size; ++j) { for (int k = 0; k < memory_size; ++k) { //std::cout << "Node: " << node_name << "; Batch: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Output: " << model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getOutput()(j, k, nodes_map.at(node_name)->getTensorIndex().second) << ", Expected Output: " << output(j, i) << std::endl; //std::cout << "Calc Net Input: " << model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getInput()(j, k, nodes_map.at(node_name)->getTensorIndex().second) << ", Expected Net Input: " << net_input(j, i) << std::endl; BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getInput()(j, k, nodes_map.at(node_name)->getTensorIndex().second), net_input(j, i), 1e-3); BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getOutput()(j, k, nodes_map.at(node_name)->getTensorIndex().second), output(j, i), 1e-3); } } } } Model<float> model_executeModelErrorOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(executeModelErrorOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations model_interpreter.getForwardPropogationOperations(model_executeModelErrorOperations, batch_size, memory_size, train, false, true, true); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); model_interpreter.mapValuesToLayers(model_executeModelErrorOperations, input, node_ids, "output"); model_interpreter.initBiases(model_executeModelErrorOperations); // create the bias model_interpreter.executeForwardPropogationOperations(0); // FP model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // allocate the memory // calculate the model error std::vector<std::string> output_nodes = { "4", "5" }; Eigen::Tensor<float, 2> expected(batch_size, (int)output_nodes.size()); expected.setValues({ {0, 1}, {0, 1}, {0, 1}, {0, 1} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_grad_function = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); const int layer_id = model_executeModelErrorOperations.getNode("4").getTensorIndex().first; model_interpreter.executeModelErrorOperations(expected, layer_id, loss_function, loss_grad_function, 0); Eigen::Tensor<float, 2> error(batch_size, memory_size); error.setValues({ {105.25}, {171.25}, {253.25}, {351.25} }); for (int j = 0; j < batch_size; ++j){ for (int k = 0; k < memory_size; ++k) { BOOST_CHECK_CLOSE(model_interpreter.getModelError()->getError()(j, k), error(j, k), 1e-6); } } // TODO: include full memory size Eigen::Tensor<float, 2> node_error(batch_size, (int)output_nodes.size()); node_error.setValues({ {-7.5, -7}, {-9.5, -9}, {-11.5, -11}, {-13.5, -13} }); auto nodes_map = model_executeModelErrorOperations.getNodesMap(); for (int i = 0; i < (int)output_nodes.size(); ++i){ const std::string node_name = output_nodes[i]; for (int j = 0; j < batch_size; ++j){ for (int k = 0; k < memory_size; ++k) { BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getError()(j, k, nodes_map.at(node_name)->getTensorIndex().second), node_error(j, i), 1e-3); } } } } Model<float> model_executeModelMetricOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(executeModelMetricOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations model_interpreter.getForwardPropogationOperations(model_executeModelMetricOperations, batch_size, memory_size, train, false, true, true); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); model_interpreter.mapValuesToLayers(model_executeModelMetricOperations, input, node_ids, "output"); model_interpreter.initBiases(model_executeModelMetricOperations); // create the bias model_interpreter.executeForwardPropogationOperations(0); // FP model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // allocate the memory // calculate the model error std::vector<std::string> output_nodes = { "4", "5" }; Eigen::Tensor<float, 2> expected(batch_size, (int)output_nodes.size()); expected.setValues({ {0, 1}, {0, 1}, {0, 1}, {0, 1} }); std::shared_ptr<MetricFunctionTensorOp<float, Eigen::DefaultDevice>> solver = std::make_shared<MAETensorOp<float, Eigen::DefaultDevice>>(MAETensorOp<float, Eigen::DefaultDevice>()); const int layer_id = model_executeModelMetricOperations.getNode("4").getTensorIndex().first; model_interpreter.executeModelMetricOperations(expected, layer_id, solver, 0, 0); Eigen::Tensor<float, 2> metric(n_metrics, memory_size); metric.setValues({ {20.5} }); for (int j = 0; j < n_metrics; ++j) { for (int k = 0; k < memory_size; ++k) { BOOST_CHECK_CLOSE(model_interpreter.getModelError()->getMetric()(j, k), metric(j, k), 1e-6); } } } Model<float> model_executeBackwardPropogationOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(executeBackwardPropogationOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations model_interpreter.getForwardPropogationOperations(model_executeBackwardPropogationOperations, batch_size, memory_size, train, false, true, true); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); model_interpreter.mapValuesToLayers(model_executeBackwardPropogationOperations, input, node_ids, "output"); model_interpreter.initBiases(model_executeBackwardPropogationOperations); // create the bias model_interpreter.executeForwardPropogationOperations(0); // FP model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // allocate the memory // calculate the model error std::vector<std::string> output_nodes = { "4", "5" }; Eigen::Tensor<float, 2> expected(batch_size, (int)output_nodes.size()); expected.setValues({ {0, 1}, {0, 1}, {0, 1}, {0, 1} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_grad_function = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); const int layer_id = model_executeBackwardPropogationOperations.getNode("4").getTensorIndex().first; model_interpreter.executeModelErrorOperations(expected, layer_id, loss_function, loss_grad_function, 0); model_interpreter.executeBackwardPropogationOperations(0); // BP std::vector<std::string> error_nodes = { "6", "2", "3" }; Eigen::Tensor<float, 2> error(batch_size, (int)error_nodes.size()); error.setValues({ {-29, -14.5, -14.5}, {-37, -18.5, -18.5}, {-45, -22.5, -22.5}, {-53, -26.5, -26.5} }); Eigen::Tensor<float, 2> derivative(batch_size, (int)error_nodes.size()); derivative.setValues({ {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1} }); auto nodes_map = model_executeBackwardPropogationOperations.getNodesMap(); for (int i = 0; i < (int)error_nodes.size(); ++i) { const std::string node_name = error_nodes[i]; for (int j = 0; j < batch_size; ++j) { for (int k = 0; k < memory_size; ++k) { BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getError()(j, k, nodes_map.at(node_name)->getTensorIndex().second), error(j, i), 1e-3); BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getDerivative()(j, k, nodes_map.at(node_name)->getTensorIndex().second), derivative(j, i), 1e-3); } } } } Model<float> model_executeWeightErrorOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(executeWeightErrorOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations model_interpreter.getForwardPropogationOperations(model_executeWeightErrorOperations, batch_size, memory_size, train, false, true, true); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); model_interpreter.mapValuesToLayers(model_executeWeightErrorOperations, input, node_ids, "output"); model_interpreter.initBiases(model_executeWeightErrorOperations); // create the bias model_interpreter.executeForwardPropogationOperations(0); // FP model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // allocate the memory // calculate the model error std::vector<std::string> output_nodes = { "4", "5" }; Eigen::Tensor<float, 2> expected(batch_size, (int)output_nodes.size()); expected.setValues({ {0, 1}, {0, 1}, {0, 1}, {0, 1} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_grad_function = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); const int layer_id = model_executeWeightErrorOperations.getNode("4").getTensorIndex().first; model_interpreter.executeModelErrorOperations(expected, layer_id, loss_function, loss_grad_function, 0); model_interpreter.executeBackwardPropogationOperations(0); // BP model_interpreter.executeWeightErrorOperations(); // Weight error // test values of input and hidden layers const std::vector<std::string> weight_ids = { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11" }; Eigen::Tensor<float, 1> weights((int)weight_ids.size()); weights.setValues({56.25f, 56.25f, 138.25f, 138.25f, 20.5f, 20.5f, 110.0f, 105.0f, 110.0f, 105.0f, 10.5f, 10.0f }); auto weights_map = model_executeBackwardPropogationOperations.getWeightsMap(); for (int i = 0; i < weight_ids.size(); ++i) { //std::cout << "Weight Error: " << weight_ids[i] << "; Calculated: " << model_interpreter.getWeightTensor( // std::get<0>(weights_map.at(weight_ids[i])->getTensorIndex()[0]))->getError()( // std::get<1>(weights_map.at(weight_ids[i])->getTensorIndex()[0]), std::get<2>(weights_map.at(weight_ids[i])->getTensorIndex()[0])) << ", Expected: " << weights(i) << std::endl; BOOST_CHECK_CLOSE(model_interpreter.getWeightTensor( std::get<0>(weights_map.at(weight_ids[i])->getTensorIndex()[0]))->getError()( std::get<1>(weights_map.at(weight_ids[i])->getTensorIndex()[0]), std::get<2>(weights_map.at(weight_ids[i])->getTensorIndex()[0])), weights(i), 1e-3); } } Model<float> model_executeWeightUpdateOperations = makeModelToy1(); BOOST_AUTO_TEST_CASE(executeWeightUpdateOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations model_interpreter.getForwardPropogationOperations(model_executeWeightUpdateOperations, batch_size, memory_size, train, false, true, true); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); model_interpreter.mapValuesToLayers(model_executeWeightUpdateOperations, input, node_ids, "output"); model_interpreter.initBiases(model_executeWeightUpdateOperations); // create the bias model_interpreter.executeForwardPropogationOperations(0); // FP model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // allocate the memory // calculate the model error std::vector<std::string> output_nodes = { "4", "5" }; Eigen::Tensor<float, 2> expected(batch_size, (int)output_nodes.size()); expected.setValues({ {0, 1}, {0, 1}, {0, 1}, {0, 1} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_grad_function = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); const int layer_id = model_executeWeightUpdateOperations.getNode("4").getTensorIndex().first; model_interpreter.executeModelErrorOperations(expected, layer_id, loss_function, loss_grad_function, 0); model_interpreter.executeBackwardPropogationOperations(0); // BP model_interpreter.executeWeightErrorOperations(); // Weight error model_interpreter.executeWeightUpdateOperations(0); // Weight update // test values of input and hidden layers const std::vector<std::string> weight_ids = { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11" }; Eigen::Tensor<float, 1> weights((int)weight_ids.size()); weights.setValues({ 0.4375f, 0.4375f, -0.382499933f, -0.382499933f, 0.795000017f, 0.795000017f, -0.100000024f, -0.0499999523f, -0.100000024, -0.0499999523f, 0.894999981f, 0.899999976f }); auto weights_map = model_executeBackwardPropogationOperations.getWeightsMap(); for (int i = 0; i < weight_ids.size(); ++i) { //std::cout<<"Weight: "<< weight_ids[i] <<"; Calculated: "<<model_interpreter.getWeightTensor( // std::get<0>(weights_map.at(weight_ids[i])->getTensorIndex()[0]))->getWeight()( // std::get<1>(weights_map.at(weight_ids[i])->getTensorIndex()[0]), std::get<2>(weights_map.at(weight_ids[i])->getTensorIndex()[0])) <<", Expected: "<<weights(i)<<std::endl; BOOST_CHECK_CLOSE(model_interpreter.getWeightTensor( std::get<0>(weights_map.at(weight_ids[i])->getTensorIndex()[0]))->getWeight()( std::get<1>(weights_map.at(weight_ids[i])->getTensorIndex()[0]), std::get<2>(weights_map.at(weight_ids[i])->getTensorIndex()[0])), weights(i), 1e-3); } } Model<float> model_modelTrainer1 = makeModelToy1(); BOOST_AUTO_TEST_CASE(modelTrainer1) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const int n_metrics = 1; const bool train = true; // update the model solver std::shared_ptr<SolverOp<float>> solver(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); for (auto& weight_map : model_modelTrainer1.getWeightsMap()) { if (weight_map.second->getSolverOp()->getName() == "SGDOp") weight_map.second->setSolverOp(solver); } // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_modelTrainer1, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); // create the expected output std::vector<std::string> output_nodes = { "4", "5" }; Eigen::Tensor<float, 2> expected(batch_size, (int)output_nodes.size()); expected.setValues({ {0, 1}, {0, 1}, {0, 1}, {0, 1} }); std::shared_ptr<LossFunctionTensorOp<float, Eigen::DefaultDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::DefaultDevice>>(MSELossTensorOp<float, Eigen::DefaultDevice>()); std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::DefaultDevice>> loss_grad_function = std::make_shared<MSELossGradTensorOp<float, Eigen::DefaultDevice>>(MSELossGradTensorOp<float, Eigen::DefaultDevice>()); const int layer_id = model_modelTrainer1.getNode("4").getTensorIndex().first; // iterate until we find the optimal values const int max_iter = 20; for (int iter = 0; iter < max_iter; ++iter) { // assign the input data model_interpreter.mapValuesToLayers(model_modelTrainer1, input, node_ids, "output"); model_interpreter.initBiases(model_modelTrainer1); // create the bias model_interpreter.executeForwardPropogationOperations(0); //FP // calculate the model error and node output error model_interpreter.executeModelErrorOperations(expected, layer_id, loss_function, loss_grad_function, 0); std::cout << "Error at iteration: " << iter << " is " << model_interpreter.getModelError()->getError().sum() << std::endl; model_interpreter.executeBackwardPropogationOperations(0); // BP model_interpreter.executeWeightErrorOperations(); // Weight error model_interpreter.executeWeightUpdateOperations(iter); // Weight update // reinitialize the model if (iter != max_iter - 1) { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } } const Eigen::Tensor<float, 0> total_error = model_interpreter.getModelError()->getError().sum(); BOOST_CHECK(total_error(0) <= 757.0); } Model<float> makeModelToy2() { /** * Directed Cyclic Graph Toy Network Model */ Node<float> i1, h1, o1, b1, b2; Link l1, l2, l3, lb1, lb2; Weight<float> w1, w2, w3, wb1, wb2; Model<float> model2; // Toy network: 1 hidden layer, fully connected, DCG i1 = Node<float>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("2", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<float>("3", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<float>("4", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1 = Weight<float>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2 = Weight<float>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3 = Weight<float>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb1 = Weight<float>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb2 = Weight<float>("4", weight_init, solver); weight_init.reset(); solver.reset(); // links l1 = Link("0", "0", "1", "0"); l2 = Link("1", "1", "2", "1"); l3 = Link("2", "1", "1", "2"); // cycle lb1 = Link("3", "3", "1", "3"); lb2 = Link("4", "4", "2", "4"); model2.setId(2); model2.addNodes({ i1, h1, o1, b1, b2 }); model2.addWeights({ w1, w2, w3, wb1, wb2 }); model2.addLinks({ l1, l2, l3, lb1, lb2 }); model2.findCycles(); return model2; } Model<float> model_FPTT = makeModelToy2(); BOOST_AUTO_TEST_CASE(FPTT) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 5; const int memory_size = 8; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_FPTT, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input const std::vector<std::string> input_ids = { "0", "3", "4" }; // biases are set to zero Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)input_ids.size()); input.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); model_interpreter.mapValuesToLayers(model_FPTT, input, input_ids, "output"); model_interpreter.FPTT(4); // test values of output nodes Eigen::Tensor<float, 3> output(batch_size, memory_size, 5); // dim2: # of model nodes output.setValues({ {{8, 26, 26, 0, 0}, {7, 18, 18, 0, 0}, {6, 11, 11, 0, 0}, {5, 5, 5, 0, 0}, {4, 0, 0, 0, 0}, {3, 0, 0, 0, 0}, {2, 0, 0, 0, 0}, {1, 0, 0, 0, 0}}, {{9, 30, 30, 0, 0}, {8, 21, 21, 0, 0}, {7, 13, 13, 0, 0}, {6, 6, 6, 0, 0}, {5, 0, 0, 0, 0}, {4, 0, 0, 0, 0}, {3, 0, 0, 0, 0}, {2, 0, 0, 0, 0}}, {{10, 34, 34, 0, 0}, {9, 24, 24, 0, 0}, {8, 15, 15, 0, 0}, {7, 7, 7, 0, 0}, {6, 0, 0, 0, 0}, {5, 0, 0, 0, 0}, {4, 0, 0, 0, 0}, {3, 0, 0, 0, 0}}, {{11, 38, 38, 0, 0}, {10, 27, 27, 0, 0}, {9, 17, 17, 0, 0}, {8, 8, 8, 0, 0}, {7, 0, 0, 0, 0}, {6, 0, 0, 0, 0}, {5, 0, 0, 0, 0}, {4, 0, 0, 0, 0}}, {{12, 42, 42, 0, 0}, {11, 30, 30, 0, 0}, {10, 19, 19, 0, 0}, {9, 9, 9, 0, 0}, {8, 0, 0, 0, 0}, {7, 0, 0, 0, 0}, {6, 0, 0, 0, 0}, {5, 0, 0, 0, 0}} } ); Eigen::Tensor<float, 3> net_input(batch_size, memory_size, 5); // dim2: # of model nodes net_input.setValues({ {{0, 26, 26, 0, 0}, {0, 18, 18, 0, 0}, {0, 11, 11, 0, 0}, {0, 5, 5, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, {{0, 30, 30, 0, 0}, {0, 21, 21, 0, 0}, {0, 13, 13, 0, 0}, {0, 6, 6, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, {{0, 34, 34, 0, 0}, {0, 24, 24, 0, 0}, {0, 15, 15, 0, 0}, {0, 7, 7, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, {{0, 38, 38, 0, 0}, {0, 27, 27, 0, 0}, {0, 17, 17, 0, 0}, {0, 8, 8, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, {{0, 42, 42, 0, 0}, {0, 30, 30, 0, 0}, {0, 19, 19, 0, 0}, {0, 9, 9, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}} } ); const std::vector<std::string> output_nodes = { "0", "1", "2", "3", "4" }; auto nodes_map = model_FPTT.getNodesMap(); for (int j = 0; j < batch_size; ++j){ for (int k = 0; k < memory_size; ++k) { for (int i = 0; i < output_nodes.size(); ++i) { const std::string node_name = output_nodes[i]; //std::cout << "Node: " << node_name << "; Batch: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Output: " << model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getOutput()(j, k, nodes_map.at(node_name)->getTensorIndex().second) << ", Expected Output: " << output(j, k, i) << std::endl; //std::cout << "Calc Net Input: " << model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getInput()(j, k, nodes_map.at(node_name)->getTensorIndex().second) << ", Expected Net Input: " << net_input(j, k, i) << std::endl; BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getOutput()(j, k, nodes_map.at(node_name)->getTensorIndex().second), output(j, k, i), 1e-3); BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getInput()(j, k, nodes_map.at(node_name)->getTensorIndex().second), net_input(j, k, i), 1e-3); } } } } Model<float> model_CETT = makeModelToy2(); BOOST_AUTO_TEST_CASE(CETT) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 5; const int memory_size = 8; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_CETT, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input const std::vector<std::string> input_ids = { "0", "3", "4" }; // biases are set to zero Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)input_ids.size()); input.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); model_interpreter.mapValuesToLayers(model_CETT, input, input_ids, "output"); model_interpreter.FPTT(4); // calculate the error // expected output (from t=n to t=0) const std::vector<std::string> output_nodes = { "2" }; // y = m1*(m2*x + b*yprev) where m1 = 1, m2 = 1 and b = -1 Eigen::Tensor<float, 3> expected(batch_size, memory_size, (int)output_nodes.size()); expected.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } } ); std::shared_ptr<LossFunctionOp<float>> loss_function = std::make_shared<MSELossOp<float>>(MSELossOp<float>()); std::shared_ptr<LossFunctionGradOp<float>> loss_function_grad = std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()); model_interpreter.CETT(model_CETT, expected, output_nodes, loss_function, loss_function_grad, 4); // test values of errors of the output nodes Eigen::Tensor<float, 2> model_error(batch_size, memory_size); model_error.setValues({ {242,98,32,2,0,0,0,0}, {312.5f,144.5f,40.5f,4.5f,0,0,0,0}, {420.5f,180.5f,60.5f,4.5f,0,0,0,0}, {512,242,72,8,0,0,0,0}, {648,288,98,8,0,0,0,0} }); Eigen::Tensor<float, 3> node_error(batch_size, memory_size, (int)output_nodes.size()); node_error.setValues( { { { -22 }, { -14 }, { -8 }, { -2 }, { 0.0f }, { 0.0f }, { 0.0f }, { 0.0f }}, { { -25 },{ -17 },{ -9 },{ -3 },{ 0.0f },{ 0.0f },{ 0.0f },{ 0.0f } }, { { -29 },{ -19 },{ -11 },{ -3 },{ 0.0f },{ 0.0f },{ 0.0f },{ 0.0f } }, { { -32 },{ -22 },{ -12 },{ -4 },{ 0.0f },{ 0.0f },{ 0.0f },{ 0.0f } }, { { -36 },{ -24 },{ -14 },{ -4 },{ 0.0f },{ 0.0f },{ 0.0f },{ 0.0f } } } ); auto nodes_map = model_CETT.getNodesMap(); for (int j = 0; j < batch_size; ++j) { for (int k = 0; k < memory_size; ++k) { //std::cout << "Batch: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Model Error: " << model_interpreter.getModelError()->getError()(j, k) << ", Expected Error: " << model_error(j, k) << std::endl; BOOST_CHECK_CLOSE(model_interpreter.getModelError()->getError()(j, k), model_error(j, k), 1e-6); for (int i = 0; i < output_nodes.size(); ++i) { const std::string node_name = output_nodes[i]; //std::cout << "Node: " << node_name << "; Batch: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Node Error: " << model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getError()(j, k, nodes_map.at(node_name)->getTensorIndex().second) << ", Expected Error: " << node_error(j, k, i) << std::endl; BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(nodes_map.at(node_name)->getTensorIndex().first)->getError()(j, k, nodes_map.at(node_name)->getTensorIndex().second), node_error(j, k, i), 1e-3); } } } } Model<float> model_CMTT = makeModelToy2(); BOOST_AUTO_TEST_CASE(CMTT) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 5; const int memory_size = 8; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_CMTT, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input const std::vector<std::string> input_ids = { "0", "3", "4" }; // biases are set to zero Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)input_ids.size()); input.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); model_interpreter.mapValuesToLayers(model_CMTT, input, input_ids, "output"); model_interpreter.FPTT(4); // calculate the metric // expected output (from t=n to t=0) const std::vector<std::string> output_nodes = { "2" }; // y = m1*(m2*x + b*yprev) where m1 = 1, m2 = 1 and b = -1 Eigen::Tensor<float, 3> expected(batch_size, memory_size, (int)output_nodes.size()); expected.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } } ); std::shared_ptr<MetricFunctionOp<float>> metric_function = std::make_shared<MAEOp<float>>(MAEOp<float>()); model_interpreter.CMTT(model_CMTT, expected, output_nodes, metric_function, 4, 0); // test values of metrics of the output nodes Eigen::Tensor<float, 2> model_metric(n_metrics, memory_size); model_metric.setValues({ {28.7999,19.2,10.8,3.2,0,0,0,0}}); auto nodes_map = model_CMTT.getNodesMap(); for (int j = 0; j < n_metrics; ++j) { for (int k = 0; k < memory_size; ++k) { //std::cout << "Metric: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Model Error: " << model_interpreter.getModelError()->getMetric()(j, k) << ", Expected Error: " << model_metric(j, k) << std::endl; BOOST_CHECK_CLOSE(model_interpreter.getModelError()->getMetric()(j, k), model_metric(j, k), 1e-3); } } } Model<float> model_TBPTT = makeModelToy2(); BOOST_AUTO_TEST_CASE(TBPTT) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 5; const int memory_size = 8; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_TBPTT, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input const std::vector<std::string> input_ids = { "0", "3", "4" }; // biases are set to zero Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)input_ids.size()); input.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); model_interpreter.mapValuesToLayers(model_TBPTT, input, input_ids, "output"); model_interpreter.FPTT(4); // calculate the error // expected output (from t=n to t=0) const std::vector<std::string> output_nodes = { "2" }; // y = m1*(m2*x + b*yprev) where m1 = 1, m2 = 1 and b = -1 Eigen::Tensor<float, 3> expected(batch_size, memory_size, (int)output_nodes.size()); expected.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } } ); std::shared_ptr<LossFunctionOp<float>> loss_function = std::make_shared<MSELossOp<float>>(MSELossOp<float>()); std::shared_ptr<LossFunctionGradOp<float>> loss_function_grad = std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()); model_interpreter.CETT(model_TBPTT, expected, output_nodes, loss_function, loss_function_grad, 4); model_interpreter.TBPTT(4); // test values of output nodes Eigen::Tensor<float, 3> node_error(batch_size, memory_size, 5); // dim2: # of model nodes node_error.setValues({ { { -22, -22, -22, -22, -22 },{-36, -36, -14, -36, -14 },{ -44, -44, -8, -44, -8 },{ -46, -46, -2, -46, -2 },{ 0, -46, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 } }, { { -25, -25, -25, -25, -25 },{ -42, -42, -17, -42, -17 },{ -51, -51, -9, -51, -9 },{ -54, -54, -3, -54, -3 },{ 0, -54, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 } }, { { -29, -29, -29, -29, -29 },{ -48, -48, -19, -48, -19 },{ -59, -59, -11, -59, -11 },{ -62, -62, -3, -62, -3 },{ 0, -62, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 } }, { { -32, -32, -32, -32, -32 },{ -54, -54, -22, -54, -22 },{ -66, -66, -12, -66, -12 },{ -70, -70, -4, -70, -4 },{ 0, -70, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 } }, { {-36, -36, -36, -36, -36 },{-60, -60, -24, -60, -24 },{-74, -74, -14, -74, -14 },{ -78, -78, -4, -78, -4 },{ 0, -78, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0 } } } ); Eigen::Tensor<float, 3> derivative(batch_size, memory_size, 5); derivative.setValues({ {{1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {0, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, {{1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {0, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, {{1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {0, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, {{1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {0, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, {{1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {1, 1, 0, 1, 1}, {0, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}} } ); const std::vector<std::string> error_nodes = { "0", "1", "2", "3", "4" }; for (int j = 0; j < batch_size; ++j) { for (int k = 0; k < memory_size; ++k) { for (int i = 0; i < error_nodes.size(); ++i) { const std::string node_name = error_nodes[i]; //std::cout << "Node: " << node_name << "; Batch: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Error: " << model_interpreter.getLayerTensor(model_TBPTT.nodes_.at(node_name)->getTensorIndex().first)->getError()(j, k, model_TBPTT.nodes_.at(node_name)->getTensorIndex().second) << ", Expected Error: " << node_error(j, k, i) << std::endl; //std::cout << "Calc Derivative: " << model_interpreter.getLayerTensor(model_TBPTT.nodes_.at(node_name)->getTensorIndex().first)->getDerivative()(j, k, model_TBPTT.nodes_.at(node_name)->getTensorIndex().second) << ", Expected Derivative: " << derivative(j, k, i) << std::endl; BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(model_TBPTT.nodes_.at(node_name)->getTensorIndex().first)->getError()(j, k, model_TBPTT.nodes_.at(node_name)->getTensorIndex().second), node_error(j, k, i), 1e-3); BOOST_CHECK_CLOSE(model_interpreter.getLayerTensor(model_TBPTT.nodes_.at(node_name)->getTensorIndex().first)->getDerivative()(j, k, model_TBPTT.nodes_.at(node_name)->getTensorIndex().second), derivative(j, k, i), 1e-3); } } } } Model<float> model_updateWeights = makeModelToy2(); BOOST_AUTO_TEST_CASE(updateWeights) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 5; const int memory_size = 8; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_updateWeights, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input const std::vector<std::string> input_ids = { "0", "3", "4" }; // biases are set to zero Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)input_ids.size()); input.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); model_interpreter.mapValuesToLayers(model_updateWeights, input, input_ids, "output"); model_interpreter.FPTT(4); // calculate the error // expected output (from t=n to t=0) const std::vector<std::string> output_nodes = { "2" }; // y = m1*(m2*x + b*yprev) where m1 = 1, m2 = 1 and b = -1 Eigen::Tensor<float, 3> expected(batch_size, memory_size, (int)output_nodes.size()); expected.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } }); std::shared_ptr<LossFunctionOp<float>> loss_function = std::make_shared<MSELossOp<float>>(MSELossOp<float>()); std::shared_ptr<LossFunctionGradOp<float>> loss_function_grad = std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()); model_interpreter.CETT(model_updateWeights, expected, output_nodes, loss_function, loss_function_grad, 4); model_interpreter.TBPTT(4); model_interpreter.updateWeights(0); auto weights_map = model_TBPTT.getWeightsMap(); // test values of output nodes std::vector<std::string> weight_ids = { "0", "1", "2", "3", "4" }; Eigen::Tensor<float, 1> weights(weight_ids.size()); weights.setValues({ -19.6240005f, -15.744f, -34.572f, 1.0f, 1.0f }); for (int i = 0; i < weight_ids.size(); ++i) { BOOST_CHECK_CLOSE(model_interpreter.getWeightTensor( std::get<0>(weights_map.at(weight_ids[i])->getTensorIndex()[0]))->getWeight()( std::get<1>(weights_map.at(weight_ids[i])->getTensorIndex()[0]), std::get<2>(weights_map.at(weight_ids[i])->getTensorIndex()[0])), weights(i), 1e-3); } } Model<float> model_modelTrainer2 = makeModelToy2(); BOOST_AUTO_TEST_CASE(modelTrainer2) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 5; const int memory_size = 8; const int n_metrics = 1; const bool train = true; // update the model solver std::shared_ptr<SolverOp<float>> solver(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); for (auto& weight_map : model_modelTrainer2.getWeightsMap()) { if (weight_map.second->getSolverOp()->getName() == "SGDOp") weight_map.second->setSolverOp(solver); } // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_modelTrainer2, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input const std::vector<std::string> input_nodes = { "0", "3", "4" }; // biases are set to zero Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)input_nodes.size()); input.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); // expected output (from t=n to t=0) for y = m1*(m2*x + b*yprev) where m1 = 1, m2 = 1 and b = -1 const std::vector<std::string> output_nodes = { "2" }; Eigen::Tensor<float, 3> expected(batch_size, memory_size, (int)output_nodes.size()); expected.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } }); std::shared_ptr<LossFunctionOp<float>> loss_function = std::make_shared<MSELossOp<float>>(MSELossOp<float>()); std::shared_ptr<LossFunctionGradOp<float>> loss_function_grad = std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()); // iterate until we find the optimal values const int max_iter = 50; for (int iter = 0; iter < max_iter; ++iter) { // assign the input data model_interpreter.initBiases(model_modelTrainer2); // create the bias model_interpreter.mapValuesToLayers(model_modelTrainer2, input, input_nodes, "output"); model_interpreter.FPTT(4); //FP // calculate the model error and node output error model_interpreter.CETT(model_modelTrainer2, expected, output_nodes, loss_function, loss_function_grad, 4); std::cout << "Error at iteration: " << iter << " is " << model_interpreter.getModelError()->getError().sum() << std::endl; model_interpreter.TBPTT(4); // BP model_interpreter.updateWeights(iter); // Weight update // reinitialize the model if (iter != max_iter - 1) { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } } const Eigen::Tensor<float, 0> total_error = model_interpreter.getModelError()->getError().sum(); BOOST_CHECK(total_error(0) <= 1492.6); } Model<float> model_getModelResults = makeModelToy2(); BOOST_AUTO_TEST_CASE(getModelResults) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 5; const int memory_size = 8; const int n_metrics = 1; const bool train = true; // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_getModelResults, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input const std::vector<std::string> input_ids = { "0", "3", "4" }; // biases are set to zero Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)input_ids.size()); input.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); model_interpreter.mapValuesToLayers(model_getModelResults, input, input_ids, "output"); model_interpreter.mapValuesToLayers(model_getModelResults, input, input_ids, "input"); model_interpreter.FPTT(4); // calculate the error // expected output (from t=n to t=0) const std::vector<std::string> output_nodes = { "2" }; // y = m1*(m2*x + b*yprev) where m1 = 1, m2 = 1 and b = -1 Eigen::Tensor<float, 3> expected(batch_size, memory_size, (int)output_nodes.size()); expected.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } }); std::shared_ptr<LossFunctionOp<float>> loss_function = std::make_shared<MSELossOp<float>>(MSELossOp<float>()); std::shared_ptr<LossFunctionGradOp<float>> loss_function_grad = std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()); std::shared_ptr<MetricFunctionOp<float>> metric_function = std::make_shared<MAEOp<float>>(MAEOp<float>()); model_interpreter.CETT(model_getModelResults, expected, output_nodes, loss_function, loss_function_grad, 4); model_interpreter.CMTT(model_getModelResults, expected, output_nodes, metric_function, 4, 0); model_interpreter.TBPTT(4); model_interpreter.updateWeights(0); model_interpreter.getModelResults(model_getModelResults, true, true, true, true); // test values of output nodes Eigen::Tensor<float, 3> output(batch_size, memory_size, (int)output_nodes.size()); // dim2: # of model nodes output.setValues({ {{26}, {18}, {11}, {5}, {0}, {0}, {0}, {0}}, {{30}, {21}, {13}, {6}, {0}, {0}, {0}, {0}}, {{34}, {24}, {15}, {7}, {0}, {0}, {0}, {0}}, {{38}, {27}, {17}, {8}, {0}, {0}, {0}, {0}}, {{42}, {30}, {19}, {9}, {0}, {0}, {0}, {0}} } ); for (int j = 0; j < batch_size; ++j) { for (int k = 0; k < memory_size; ++k) { for (int i = 0; i < output_nodes.size(); ++i) { const std::string node_name = output_nodes[i]; //std::cout << "Node: " << node_name << "; Batch: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Output: " << model_getModelResults.getNodesMap().at(node_name)->getOutput()(j, k) << ", Expected Output: " << output(j, k, i) << std::endl; BOOST_CHECK_CLOSE(model_getModelResults.getNodesMap().at(node_name)->getOutput()(j, k), output(j, k, i), 1e-3); } } } // test values of model error Eigen::Tensor<float, 2> model_error(batch_size, memory_size); model_error.setValues({ {242,98,32,2,0,0,0,0}, {312.5f,144.5f,40.5f,4.5f,0,0,0,0}, {420.5f,180.5f,60.5f,4.5f,0,0,0,0}, {512,242,72,8,0,0,0,0}, {648,288,98,8,0,0,0,0} }); for (int j = 0; j < batch_size; ++j) { for (int k = 0; k < memory_size; ++k) { //std::cout << "Batch: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Model Error: " << model_getModelResults.getError()(j, k) << ", Expected Error: " << model_error(j, k) << std::endl; BOOST_CHECK_CLOSE(model_getModelResults.getError()(j, k), model_error(j, k), 1e-6); } } // test values of metrics of the output nodes Eigen::Tensor<float, 2> model_metric(n_metrics, memory_size); model_metric.setValues({ {28.7999,19.2,10.8,3.2,0,0,0,0} }); for (int j = 0; j < n_metrics; ++j) { for (int k = 0; k < memory_size; ++k) { //std::cout << "Metric: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Model Error: " << model_getModelResults.getMetric()(j, k) << ", Expected Error: " << model_metric(j, k) << std::endl; BOOST_CHECK_CLOSE(model_getModelResults.getMetric()(j, k), model_metric(j, k), 1e-3); } } // test values of weights std::vector<std::string> weight_ids = { "0", "1", "2", "3", "4" }; Eigen::Tensor<float, 1> weights(weight_ids.size()); weights.setValues({ -19.6240005f, -15.744f, -34.572f, 1.0f, 1.0f }); for (int i = 0; i < weight_ids.size(); ++i) { BOOST_CHECK_CLOSE(model_getModelResults.getWeightsMap().at(weight_ids[i])->getWeight(), weights(i), 1e-3); } // test values of input nodes std::vector<std::string> input_nodes = { "0" }; Eigen::Tensor<float, 3> input_test(batch_size, memory_size, (int)input_nodes.size()); // dim2: # of model nodes input_test.setValues({ {{8}, {7}, {6}, {5}, {4}, {3}, {2}, {1}}, {{9}, {8}, {7}, {6}, {5}, {4}, {3}, {2}}, {{10}, {9}, {8}, {7}, {6}, {5}, {4}, {3}}, {{11}, {10}, {9}, {8}, {7}, {6}, {5}, {4}}, {{12}, {11}, {10}, {9}, {8}, {7}, {6}, {5}} } ); for (int j = 0; j < batch_size; ++j) { for (int k = 0; k < memory_size; ++k) { for (int i = 0; i < input_nodes.size(); ++i) { const std::string node_name = input_nodes[i]; //std::cout << "Node: " << node_name << "; Batch: " << j << "; Memory: " << k << std::endl; //std::cout << "Calc Input: " << model_getModelResults.getNodesMap().at(node_name)->getInput()(j, k) << ", Expected Input: " << input_test(j, k, i) << std::endl; BOOST_CHECK_CLOSE(model_getModelResults.getNodesMap().at(node_name)->getInput()(j, k), input(j, k, i), 1e-3); } } } } Model<float> makeModelToy3() { /** * Interaction Graph Toy Network Model * Linear Harmonic Oscillator with three masses and two springs */ Node<float> m1, m2, m3; Link l1_to_l2, l2_to_l1, l2_to_l3, l3_to_l2; Weight<float> w1_to_w2, w2_to_w1, w2_to_w3, w3_to_w2; Model<float> model3; // Toy network: 1 hidden layer, fully connected, DCG m1 = Node<float>("m1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); m2 = Node<float>("m2", NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); m3 = Node<float>("m3", NodeType::output, NodeStatus::initialized, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(0.1)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1_to_w2 = Weight<float>("m1_to_m2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(0.1)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2_to_w1 = Weight<float>("m2_to_m1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(0.1)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2_to_w3 = Weight<float>("m2_to_m3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(0.1)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3_to_w2 = Weight<float>("m3_to_m2", weight_init, solver); weight_init.reset(); solver.reset(); // links l1_to_l2 = Link("l1_to_l2", "m1", "m2", "m1_to_m2"); l2_to_l1 = Link("l2_to_l1", "m2", "m1", "m2_to_m1"); l2_to_l3 = Link("l2_to_l3", "m2", "m3", "m2_to_m3"); l3_to_l2 = Link("l3_to_l2", "m3", "m2", "m3_to_m2"); model3.setId(3); model3.addNodes({ m1, m2, m3 }); model3.addWeights({ w1_to_w2, w2_to_w1, w2_to_w3, w3_to_w2 }); model3.addLinks({ l1_to_l2, l2_to_l1, l2_to_l3, l3_to_l2 }); return model3; } Model<float> model_modelTrainer3 = makeModelToy3(); BOOST_AUTO_TEST_CASE(modelTrainer3) { ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 1; const int memory_size = 32; const int n_metrics = 1; const bool train = true; // update the model solver std::shared_ptr<SolverOp<float>> solver(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); for (auto& weight_map : model_modelTrainer3.getWeightsMap()) { if (weight_map.second->getSolverOp()->getName() == "SGDOp") weight_map.second->setSolverOp(solver); } // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model_modelTrainer3, batch_size, memory_size, train, false, true, false); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, n_metrics); // create the input and output (from t=n to t=0) HarmonicOscillatorSimulator<float> WeightSpring; const std::vector<std::string> input_nodes = { "m2"}; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)input_nodes.size()); const std::vector<std::string> output_nodes = { "m1", "m3" }; Eigen::Tensor<float, 3> expected(batch_size, memory_size, (int)output_nodes.size()); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { Eigen::Tensor<float, 1> time_steps(memory_size); Eigen::Tensor<float, 2> displacements(memory_size, 3); WeightSpring.WeightSpring3W2S1D(time_steps, displacements, memory_size, 0.1, 1, 1, 1, //A 1, 1, 1, //m 0, batch_iter, 0, //xo 1); //std::cout << "time_steps: " << time_steps << std::endl; //std::cout << "displacements: " << displacements << std::endl; for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { input(batch_iter, memory_iter, 0) = displacements(memory_size - 1 - memory_iter, 1); expected(batch_iter, memory_iter, 0) = displacements(memory_size - 1 - memory_iter, 0); expected(batch_iter, memory_iter, 1) = displacements(memory_size - 1 - memory_iter, 2); } } std::shared_ptr<LossFunctionOp<float>> loss_function = std::make_shared<MSELossOp<float>>(MSELossOp<float>()); std::shared_ptr<LossFunctionGradOp<float>> loss_function_grad = std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()); // iterate until we find the optimal values const int max_iter = 50; for (int iter = 0; iter < max_iter; ++iter) { // assign the input data model_interpreter.initBiases(model_modelTrainer3); // create the bias model_interpreter.mapValuesToLayers(model_modelTrainer3, input, input_nodes, "input"); model_interpreter.mapValuesToLayers(model_modelTrainer3, input, input_nodes, "output"); model_interpreter.FPTT(memory_size); //FP // calculate the model error and node output error model_interpreter.CETT(model_modelTrainer3, expected, output_nodes, loss_function, loss_function_grad, memory_size); std::cout << "Error at iteration: " << iter << " is " << model_interpreter.getModelError()->getError().sum() << std::endl; model_interpreter.TBPTT(memory_size); // BP model_interpreter.updateWeights(iter); // Weight update // reinitialize the model if (iter != max_iter - 1) { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } } const Eigen::Tensor<float, 0> total_error = model_interpreter.getModelError()->getError().sum(); BOOST_CHECK(total_error(0) <= 10.6); } BOOST_AUTO_TEST_SUITE_END()<file_sep> This package contains EvoNet - Fast and intelligent processing of LC-MS, GC-MS, and HPLC data. For more information visit our website at www.EvoNet.com. <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_CVAEFULLYCONN_H #define EVONET_CVAEFULLYCONN_H // .h #include <EvoNet/ml/ModelTrainer.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/io/ModelFile.h> #include <EvoNet/io/Parameters.h> // .cpp namespace EvoNet { /// Helper methods static void makeInputNodes(std::vector<std::string>& input_nodes, const int& n_features) { for (int i = 0; i < n_features; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } } template<class ...ParameterTypes> static void makeGaussianEncodingSamplerNodes(std::vector<std::string>& input_nodes, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(); ++i) { char name_char[512]; sprintf(name_char, "Gaussian_encoding_%012d-Sampler", i); std::string name(name_char); input_nodes.push_back(name); } } template<class ...ParameterTypes> static void makeCategoricalEncodingSamplerNodes(std::vector<std::string>& input_nodes, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(); ++i) { char name_char[512]; sprintf(name_char, "Categorical_encoding_%012d-GumbelSampler", i); std::string name(name_char); input_nodes.push_back(name); } } template<class ...ParameterTypes> static void makeCategoricalEncodingTauNodes(std::vector<std::string>& input_nodes, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(); ++i) { char name_char[512]; sprintf(name_char, "Categorical_encoding_%012d-InverseTau", i); std::string name(name_char); input_nodes.push_back(name); } } template<class ...ParameterTypes> static void makeAlphaEncodingNodes(std::vector<std::string>& input_nodes, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(); ++i) { char name_char[512]; //sprintf(name_char, "Alpha_%012d", i); sprintf(name_char, "Categorical_encoding-SoftMax-Out_%012d", i); std::string name(name_char); input_nodes.push_back(name); } } template<class ...ParameterTypes> static void makeMuEncodingNodes(std::vector<std::string>& input_nodes, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(); ++i) { char name_char[512]; sprintf(name_char, "Gaussian_encoding_%012d", i); std::string name(name_char); input_nodes.push_back(name); } } static std::vector<std::string> makeOutputNodes(const int& n_features) { std::vector<std::string> output_nodes; for (int i = 0; i < n_features; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } return output_nodes; } template<class ...ParameterTypes> static std::vector<std::string> makeMuEncodingNodes(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); std::vector<std::string> output_nodes; for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(); ++i) { char name_char[512]; sprintf(name_char, "Mu_%012d", i); std::string name(name_char); output_nodes.push_back(name); } return output_nodes; } template<class ...ParameterTypes> static std::vector<std::string> makeLogVarEncodingNodes(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); std::vector<std::string> output_nodes; for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(); ++i) { char name_char[512]; sprintf(name_char, "LogVar_%012d", i); std::string name(name_char); output_nodes.push_back(name); } return output_nodes; } template<class ...ParameterTypes> static std::vector<std::string> makeAlphaEncodingNodes(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); std::vector<std::string> output_nodes; for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(); ++i) { char name_char[512]; sprintf(name_char, "Alpha_%012d", i); std::string name(name_char); output_nodes.push_back(name); } return output_nodes; } template<class ...ParameterTypes> static std::vector<std::string> makeCategoricalSoftmaxNodes(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); std::vector<std::string> output_nodes; for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(); ++i) { char name_char[512]; sprintf(name_char, "Categorical_encoding-SoftMax-Out_%012d", i); std::string name(name_char); output_nodes.push_back(name); } return output_nodes; } template<typename TensorT, typename TrainerT, typename InterpreterT, typename InterpreterFileT, class ...ParameterTypes> static void makeModelAndInterpreters(Model<TensorT>& model, TrainerT& model_trainer, std::vector<InterpreterT>& model_interpreters, InterpreterFileT& model_interpreter_file, const int& n_features, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the model interpreters setModelInterpreterParameters(model_interpreters, args...); // define the model if (std::get<EvoNetParameters::Main::MakeModel>(parameters).get()) { std::cout << "Making the model..." << std::endl; if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "EncDec") { model_trainer.makeCVAE(model, n_features, std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); } else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "EncDecContinued") { model_trainer.makeCVAE(model, n_features, std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); // read in the weights ModelFile<TensorT> model_file; model_file.loadWeightValuesBinary(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_model.binary", model.weights_); //WeightFile<TensorT> weight_file; //weight_file.loadWeightValuesCsv(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_weights.csv", // model.weights_); // check that all weights were read in correctly for (auto& weight_map : model.getWeightsMap()) { if (weight_map.second->getInitWeight()) { std::cout << "Model " << model.getName() << " Weight " << weight_map.first << " has not be initialized." << std::endl;; } } } else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "Class") { model_trainer.makeCVAEClassifier(model, n_features, std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); } else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "ClassContinued") { model_trainer.makeCVAEClassifier(model, n_features, std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); // read in the weights ModelFile<TensorT> model_file; model_file.loadWeightValuesBinary(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_model.binary", model.weights_); // check that all weights were read in correctly for (auto& weight_map : model.getWeightsMap()) { if (weight_map.second->getInitWeight()) { std::cout << "Model " << model.getName() << " Weight " << weight_map.first << " has not be initialized." << std::endl;; } } } else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "Enc") { // make the encoder only model_trainer.makeCVAEEncoder(model, n_features, std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); // read in the weights ModelFile<TensorT> model_file; model_file.loadWeightValuesBinary(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_model.binary", model.weights_); // check that all weights were read in correctly for (auto& weight_map : model.getWeightsMap()) { if (weight_map.second->getInitWeight()) { std::cout << "Model " << model.getName() << " Weight " << weight_map.first << " has not be initialized." << std::endl;; } } } else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "Dec") { // make the decoder only model_trainer.makeCVAEDecoder(model, n_features, std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), false, true); // read in the weights ModelFile<TensorT> model_file; model_file.loadWeightValuesBinary(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_model.binary", model.weights_); // check that all weights were read in correctly for (auto& weight_map : model.getWeightsMap()) { if (weight_map.second->getInitWeight()) { std::cout << "Model " << model.getName() << " Weight " << weight_map.first << " has not be initialized." << std::endl;; } } } } else { ModelFile<TensorT> model_file; loadModelFromParameters(model, model_interpreters.at(0), model_file, model_interpreter_file, args...); } model.setName(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get()); //So that all output will be written to a specific directory } template<typename TensorT, typename InterpreterT> class CVAEFullyConn : public ModelTrainer<TensorT, InterpreterT> { public: /* @brief Variational autoencoder that encodes the labels using a concrete distribution and style using a gaussian distribution References: CVAE: arXiv:1804.00104 https://github.com/Schlumberger/joint-vae VAE: Based on Kingma et al, 2014: https://arxiv.org/pdf/1312.6114 https://github.com/pytorch/examples/blob/master/vae/main.py @param[in, out] model The network model @param[in] n_pixels The number of input/output pixels @param[in] n_categorical The length of the categorical layer @param[in] n_encodings The length of the encodings layer @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeCVAE(Model<TensorT>& model, const int& n_inputs = 784, const int& n_encodings = 64, const int& n_categorical = 10, const int& n_hidden_0 = 512, const int& n_hidden_1 = 256, const int& n_hidden_2 = 64, const bool& add_bias = true, const bool& specify_layers = false); /* @brief Decoder that generates pixels from a concrete distribution and a gaussian distribution References: arXiv:1804.00104 https://github.com/Schlumberger/joint-vae @param[in, out] model The network model @param[in] n_input The number of output nodes @param[in] n_categorical The length of the categorical layer @param[in] n_encodings The length of the encodings layer @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeCVAEDecoder(Model<TensorT>& model, const int& n_inputs = 784, const int& n_encodings = 64, const int& n_categorical = 10, const int& n_hidden_0 = 512, const int& n_hidden_1 = 256, const int& n_hidden_2 = 64, const bool& add_bias = true, const bool& specify_layers = false); /* @brief Encoder that encodes pixels to a concrete distribution and a gaussian distribution References: arXiv:1804.00104 https://github.com/Schlumberger/joint-vae @param[in, out] model The network model @param[in] n_input The number of input nodes @param[in] n_categorical The length of the categorical layer @param[in] n_encodings The length of the encodings layer @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeCVAEEncoder(Model<TensorT>& model, const int& n_inputs = 784, const int& n_encodings = 64, const int& n_categorical = 10, const int& n_hidden_0 = 512, const int& n_hidden_1 = 256, const int& n_hidden_2 = 64, const bool& add_bias = true, const bool& specify_layers = false); /* @brief Classifier that encodes pixels to labels @param[in, out] model The network model @param[in] n_input The number of input nodes @param[in] n_categorical The number of output labels @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeCVAEClassifier(Model<TensorT>& model, const int& n_inputs = 784, const int& n_categorical = 10, const int& n_hidden_0 = 512, const int& n_hidden_1 = 256, const int& n_hidden_2 = 64, const bool& add_bias = true, const bool& specify_layers = false); /* @brief Variational autoencoder that encodes the labels using a concrete distribution and style using a gaussian distribution that allows for addition or subtraction of two different encodings References: arXiv:1804.00104 https://github.com/Schlumberger/joint-vae @param[in, out] model The network model @param[in] n_pixels The number of input/output pixels @param[in] n_categorical The length of the categorical layer @param[in] n_encodings The length of the encodings layer @param[in] arithmetic_type "+" for addition or "-" for subtraction @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeCVAELatentArithmetic(Model<TensorT>& model, const int& n_inputs = 784, const int& n_encodings = 64, const int& n_categorical = 10, const char& arithmetic_type = '+', const int& n_hidden_0 = 512, const int& n_hidden_1 = 256, const int& n_hidden_2 = 64, const bool& add_bias = true, const bool& specify_layers = false); /// Members bool KL_divergence_warmup_ = true; bool supervision_warmup_ = true; int supervision_percent_ = 100; TensorT classification_loss_weight_ = 1.0; TensorT beta_c_ = 30; TensorT beta_d_ = 30; TensorT capacity_c_ = 5; TensorT capacity_d_ = 5; TensorT learning_rate_ = 1e-5; TensorT gradient_clipping_ = 10; }; template <typename TensorT, typename InterpreterT> inline void CVAEFullyConn<TensorT, InterpreterT>::makeCVAE(Model<TensorT>& model, const int& n_inputs, const int& n_encodings, const int& n_categorical, const int& n_hidden_0, const int& n_hidden_1, const int& n_hidden_2, const bool& add_bias, const bool& specify_layers) { model.setId(0); model.setName("VAE"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_norm` auto activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); auto activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(this->learning_rate_, 0.9, 0.999, 1e-8, this->gradient_clipping_)); // Add the Endocer FC layers std::vector<std::string> node_names_mu, node_names_logvar, node_names_alpha; std::vector<std::string> node_names_Gencoder, node_names_Cencoder; if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } // Add the encoding layers if (n_encodings > 0) { node_names_mu = model_builder.addFullyConnected(model, "MuEnc", "MuEnc", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_logvar = model_builder.addFullyConnected(model, "LogVarEnc", "LogVarEnc", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_Gencoder = model_builder.addGaussianEncoding(model, "Gaussian_encoding", "Gaussian_encoding", node_names_mu, node_names_logvar, true); } if (n_categorical > 0) { node_names_alpha = model_builder.addFullyConnected(model, "AlphaEncNonProp", "AlphaEncNonProp", node_names, n_categorical, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_categorical) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_alpha = model_builder.addStableSoftMax(model, "AlphaEnc", "AlphaEnc", node_names_alpha, specify_layers); node_names_Cencoder = model_builder.addCategoricalEncoding(model, "Categorical_encoding", "Categorical_encoding", node_names_alpha, true); } // Add the Decoder FC layers if (n_hidden_2 > 0) { if (node_names_Gencoder.size()>0 && node_names_Cencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Gencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE2", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Gencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Cencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } if (n_hidden_1 > 0 && n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (n_hidden_1 > 0) { if (node_names_Gencoder.size()>0 && node_names_Cencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Gencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE1", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Gencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Cencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } if (n_hidden_0 > 0 && n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (n_hidden_0 > 0) { if (node_names_Gencoder.size()>0 && node_names_Cencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Gencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE0", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Gencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size()>0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Cencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } node_names = model_builder.addFullyConnected(model, "DE-Output", "DE-Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 1)), solver_op, 0.0f, 0.0f, add_bias, true); // Add the actual output nodes node_names_mu = model_builder.addSinglyConnected(model, "Mu", "Mu", node_names_mu, node_names_mu.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names_logvar = model_builder.addSinglyConnected(model, "LogVar", "LogVar", node_names_logvar, node_names_logvar.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names_alpha = model_builder.addSinglyConnected(model, "Alpha", "Alpha", node_names_alpha, node_names_alpha.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_mu) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_logvar) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_alpha) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_Cencoder) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } template <typename TensorT, typename InterpreterT> inline void CVAEFullyConn<TensorT, InterpreterT>::makeCVAEDecoder(Model<TensorT>& model, const int& n_inputs, const int& n_encodings, const int& n_categorical, const int& n_hidden_0, const int& n_hidden_1, const int& n_hidden_2, const bool& add_bias, const bool& specify_layers) { model.setId(0); model.setName("CVAEDecoder"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_Gencoder, node_names_Cencoder; if (n_encodings > 0) node_names_Gencoder = model_builder.addInputNodes(model, "Gaussian_encoding", "Gaussian_encoding", n_encodings, specify_layers); // just Mu if (n_categorical > 0) node_names_Cencoder = model_builder.addInputNodes(model, "Categorical_encoding-SoftMax-Out", "Categorical_encoding-SoftMax-Out", n_categorical, specify_layers); // Define the activation based on `add_feature_norm` std::shared_ptr<ActivationOp<TensorT>> activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(this->learning_rate_, 0.9, 0.999, 1e-8, this->gradient_clipping_)); // Add the Decoder FC layers std::vector<std::string> node_names; if (n_hidden_2 > 0) { if (node_names_Gencoder.size() > 0 && node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Gencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE2", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Gencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Cencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } if (n_hidden_1 > 0 && n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (n_hidden_1 > 0) { if (node_names_Gencoder.size() > 0 && node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Gencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE1", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Gencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Cencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } if (n_hidden_0 > 0 && n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (n_hidden_0 > 0) { if (node_names_Gencoder.size() > 0 && node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Gencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE0", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Gencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Cencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } node_names = model_builder.addFullyConnected(model, "DE-Output", "DE-Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 1)), solver_op, 0.0f, 0.0f, add_bias, true); // Add the actual output nodes node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } template <typename TensorT, typename InterpreterT> inline void CVAEFullyConn<TensorT, InterpreterT>::makeCVAEEncoder(Model<TensorT>& model, const int& n_inputs, const int& n_encodings, const int& n_categorical, const int& n_hidden_0, const int& n_hidden_1, const int& n_hidden_2, const bool& add_bias, const bool& specify_layers) { model.setId(0); model.setName("CVAEEncoder"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` auto activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); auto activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(this->learning_rate_, 0.9, 0.999, 1e-8, this->gradient_clipping_)); // Add the Encoder FC layers std::vector<std::string> node_names_mu, node_names_logvar, node_names_alpha, node_names_Cencoder; if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } // Add the encoding layers if (n_encodings > 0) { node_names_mu = model_builder.addFullyConnected(model, "MuEnc", "MuEnc", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_logvar = model_builder.addFullyConnected(model, "LogVarEnc", "LogVarEnc", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } if (n_categorical > 0) { node_names_alpha = model_builder.addFullyConnected(model, "AlphaEncNonProp", "AlphaEncNonProp", node_names, n_categorical, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_categorical) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); //node_names_alpha = model_builder.addStableSoftMax(model, "AlphaEnc", "AlphaEnc", node_names_alpha, specify_layers); node_names_Cencoder = model_builder.addStableSoftMax(model, "Categorical_encoding", "Categorical_encoding", node_names_alpha, true); } // Add the actual output nodes node_names_mu = model_builder.addSinglyConnected(model, "Mu", "Mu", node_names_mu, node_names_mu.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names_logvar = model_builder.addSinglyConnected(model, "LogVar", "LogVar", node_names_logvar, node_names_logvar.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names_alpha = model_builder.addSinglyConnected(model, "Alpha", "Alpha", node_names_alpha, node_names_alpha.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_mu) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_logvar) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_alpha) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_Cencoder) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } template <typename TensorT, typename InterpreterT> inline void CVAEFullyConn<TensorT, InterpreterT>::makeCVAEClassifier(Model<TensorT>& model, const int& n_inputs, const const int& n_categorical, const int& n_hidden_0, const int& n_hidden_1, const int& n_hidden_2, const bool& add_bias, const bool& specify_layers) { model.setId(0); model.setName("CVAEEncoder"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` auto activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); auto activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(this->learning_rate_, 0.9, 0.999, 1e-8, this->gradient_clipping_)); // Add the Encoder FC layers std::vector<std::string> node_names_mu, node_names_logvar, node_names_alpha, node_names_Cencoder; if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } // Add the encoding layers if (n_categorical > 0) { node_names_alpha = model_builder.addFullyConnected(model, "AlphaEncNonProp", "AlphaEncNonProp", node_names, n_categorical, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_categorical) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_Cencoder = model_builder.addStableSoftMax(model, "Categorical_encoding", "Categorical_encoding", node_names_alpha, true); } // Add the actual output nodes node_names_alpha = model_builder.addSinglyConnected(model, "Alpha", "Alpha", node_names_alpha, node_names_alpha.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_alpha) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_Cencoder) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } template<typename TensorT, typename InterpreterT> inline void CVAEFullyConn<TensorT, InterpreterT>::makeCVAELatentArithmetic(Model<TensorT>& model, const int& n_inputs, const int& n_encodings, const int& n_categorical, const char& arithmetic_type, const int& n_hidden_0, const int& n_hidden_1, const int& n_hidden_2, const bool& add_bias, const bool& specify_layers) { model.setId(0); model.setName("VAELatentArithmetic"); ModelBuilder<TensorT> model_builder; // Define the activation based on `add_norm` auto activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); auto activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(this->learning_rate_, 0.9, 0.999, 1e-8, this->gradient_clipping_)); // Add the inputs (Left hand side) std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input@L", "Input@L", n_inputs, specify_layers); // Add the Endocer FC layers std::vector<std::string> node_names_mu, node_names_logvar, node_names_alpha; if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0@L", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1@L", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2@L", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } // Add the latent layer std::vector<std::string> node_names_Gencoder_L, node_names_Cencoder_L; if (n_encodings > 0) { node_names_mu = model_builder.addFullyConnected(model, "MuEnc", "MuEnc@L", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_logvar = model_builder.addFullyConnected(model, "LogVarEnc", "LogVarEnc@L", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_Gencoder_L = model_builder.addSinglyConnected(model, "Gaussian_encoding_LR", "Gaussian_encoding_L", node_names_mu, node_names_mu.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, specify_layers); } if (n_categorical > 0) { node_names_alpha = model_builder.addFullyConnected(model, "AlphaEnc", "AlphaEnc@L", node_names, n_categorical, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_categorical) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_Cencoder_L = model_builder.addStableSoftMax(model, "Categorical_encoding-SoftMax_LR", "Categorical_encoding-SoftMax_L", node_names_alpha, specify_layers); } // Add the inputs (Right hand side) node_names = model_builder.addInputNodes(model, "Input@R", "Input@R", n_inputs, specify_layers); // Add the Encoder FC layers if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0@R", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1@R", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2@R", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } // Add the latent space std::vector<std::string> node_names_Gencoder_R, node_names_Cencoder_R; if (n_encodings > 0) { node_names_mu = model_builder.addFullyConnected(model, "MuEnc", "MuEnc@R", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_logvar = model_builder.addFullyConnected(model, "LogVarEnc", "LogVarEnc@R", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_Gencoder_R = model_builder.addSinglyConnected(model, "Gaussian_encoding_LR", "Gaussian_encoding_R", node_names_mu, node_names_mu.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, specify_layers); } if (n_categorical > 0) { node_names_alpha = model_builder.addFullyConnected(model, "AlphaEnc", "AlphaEnc@R", node_names, n_categorical, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_categorical) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_Cencoder_R = model_builder.addStableSoftMax(model, "Categorical_encoding-SoftMax_LR", "Categorical_encoding-SoftMax_R", node_names_alpha, specify_layers); } // Rename the input nodes std::vector<std::vector<std::string>> tokens_vec = { {"Input@L"}, {"Input@R"} }; std::vector<std::string> replacement_vec = { "Input_L", "Input_R" }; for (auto& node_map : model.nodes_) { for (int i = 0; i < replacement_vec.size(); ++i) { if (node_map.first == tokens_vec.at(i).front()) { std::string new_node_name = ReplaceTokens(node_map.first, tokens_vec.at(i), replacement_vec.at(i)); node_map.first = new_node_name; node_map.second->setName(new_node_name); } } } for (auto& link_map : model.links_) { for (int i = 0; i < replacement_vec.size(); ++i) { if (link_map.second->getSourceNodeName() == tokens_vec.at(i).front()) { std::string new_source_name = ReplaceTokens(link_map.second->getSourceNodeName(), tokens_vec.at(i), replacement_vec.at(i)); link_map.second->setSourceNodeName(new_source_name); } } } // Rename all of the weights std::vector<std::string> tokens = { "@L", "@R" }; std::string replacement = ""; for (auto& weight_map : model.weights_) { std::string new_weight_name = ReplaceTokens(weight_map.first,tokens, replacement); weight_map.first = new_weight_name; weight_map.second->setName(new_weight_name); } for (auto& link_map : model.links_) { std::string new_weight_name = ReplaceTokens(link_map.first, tokens, replacement); link_map.second->setWeightName(new_weight_name); } // Add or subtract the left hand side latent space from the right auto weight_average = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0.5)); auto weight_sign = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)); if (arithmetic_type == '-') weight_sign = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)); std::vector<std::string> node_names_Gencoder = model_builder.addSinglyConnected(model, "Gaussian_encoding", "Gaussian_encoding", node_names_Gencoder_L, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, specify_layers); model_builder.addSinglyConnected(model, "Gaussian_encoding", node_names_Gencoder_R, node_names_Gencoder, weight_sign, std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); std::vector<std::string> node_names_Cencoder = model_builder.addSinglyConnected(model, "Categorical_encoding-SoftMax", "Categorical_encoding-SoftMax", node_names_Cencoder_L, n_categorical, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, weight_average, std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, specify_layers); model_builder.addSinglyConnected(model, "Categorical_encoding-SoftMax", node_names_Cencoder_R, node_names_Cencoder, weight_average, std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); // Add the Decoder FC layers if (n_hidden_2 > 0) { if (node_names_Gencoder.size() > 0 && node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Gencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE2", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Gencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names_Cencoder, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } if (n_hidden_1 > 0 && n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (n_hidden_1 > 0) { if (node_names_Gencoder.size() > 0 && node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Gencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE1", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Gencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names_Cencoder, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } if (n_hidden_0 > 0 && n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (n_hidden_0 > 0) { if (node_names_Gencoder.size() > 0 && node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Gencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); model_builder.addFullyConnected(model, "DE0", node_names_Cencoder, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, specify_layers); } else if (node_names_Gencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Gencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Gencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } else if (node_names_Cencoder.size() > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names_Cencoder, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_Cencoder.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); } } node_names = model_builder.addFullyConnected(model, "DE-Output", "DE-Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 1)), solver_op, 0.0f, 0.0f, add_bias, true); // Add the actual output nodes node_names_mu = model_builder.addSinglyConnected(model, "Mu", "Mu", node_names_mu, node_names_mu.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names_logvar = model_builder.addSinglyConnected(model, "LogVar", "LogVar", node_names_logvar, node_names_logvar.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_Gencoder) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_Cencoder) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } } #endif //EVONET_CVAEFULLYCONN_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelBuilderExperimental test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelBuilderExperimental.h> #include <EvoNet/core/StringParsing.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(ModelBuilderExperimental1) BOOST_AUTO_TEST_CASE(constructor) { ModelBuilderExperimental<float>* ptr = nullptr; ModelBuilderExperimental<float>* nullPointer = nullptr; ptr = new ModelBuilderExperimental<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ModelBuilderExperimental<float>* ptr = nullptr; ptr = new ModelBuilderExperimental<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(addBiochemicalReaction1) { ModelBuilderExperimental<float> model_builder; Model<float> model; // make the toy model BiochemicalReaction reaction1; reaction1.reaction_id = "HK1"; reaction1.reaction_name = "Hexokinase1"; reaction1.products_ids = std::vector<std::string>({"g6p","h","adp"}); reaction1.products_stoichiometry = std::vector<float>({ 1, 1, 1 }); reaction1.reactants_ids = std::vector<std::string>({ "glc","atp" }); reaction1.reactants_stoichiometry = std::vector<float>({ 1, 1 }); reaction1.used = true; reaction1.reversibility = false; BiochemicalReactions reactions; reactions.emplace("HK", reaction1); // make the fully connected model_builder.addBiochemicalReactionsSequencialMin( model, reactions, "Mod", "Mod1", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(4.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 1); std::vector<std::string> node_names_minReLU = { "HK1:glc","HK1&glc:atp", "HK1&adp&h::g6p", "HK1&adp::h", "HK1::adp" }; std::vector<std::string> node_names_sumReLU = { "HK1", "adp","atp","g6p","glc","h", "HK1:adp","HK1&adp","HK1&adp&h","HK1&adp&h:g6p","HK1&adp:h", "HK1::glc","HK1&glc","HK1&glc&atp","HK1&glc::atp"}; std::vector<std::string> link_names_dummyPos = { "HK1_to_HK1:glc", "glc_to_HK1:glc", "HK1::glc_to_HK1&glc", "HK1&glc_to_HK1&glc:atp", "atp_to_HK1&glc:atp", "HK1&glc::atp_to_HK1&glc&atp", "HK1&glc&atp_to_HK1&adp&h::g6p", "HK1&adp&h:g6p_to_g6p", "HK1&adp&h:g6p_to_HK1&adp&h", "HK1&adp&h_to_HK1&adp::h", "HK1&adp:h_to_HK1&adp", "HK1&adp:h_to_h", "HK1&adp_to_HK1::adp", "HK1:adp_to_HK1", "HK1:adp_to_adp"}; std::vector<std::string> link_names_dummyNeg = { "HK1::glc_to_HK1", "HK1::glc_to_glc", "HK1&glc::atp_to_HK1&glc", "HK1&glc::atp_to_atp", "HK1&adp&h:g6p_to_HK1&glc&atp", "HK1&adp:h_to_HK1&adp&h", "HK1:adp_to_HK1&adp"}; std::vector<std::string> link_names_learnable = { "HK1:glc_to_HK1::glc", "HK1&glc:atp_to_HK1&glc::atp", "HK1&adp&h::g6p_to_HK1&adp&h:g6p", "HK1&adp::h_to_HK1&adp:h", "HK1::adp_to_HK1:adp" }; //for (auto& e : model.nodes_) // std::cout << "Node: " << e.second->getName() << std::endl; //for (auto& e : model.links_) // std::cout << "Link: " << e.second->getName() << std::endl; //for (auto& e : model.weights_) // std::cout << "Weight: " << e.second->getName() << std::endl; // check the nodes for (const std::string& node_name : node_names_sumReLU) { BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getName(), node_name); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getActivation()->getName(), "ReLUOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getActivationGrad()->getName(), "ReLUGradOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegration()->getName(), "SumOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegrationError()->getName(), "SumErrorOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegrationWeightGrad()->getName(), "SumWeightGradOp"); } for (const std::string& node_name : node_names_minReLU) { BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getName(), node_name); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getActivation()->getName(), "ReLUOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getActivationGrad()->getName(), "ReLUGradOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegration()->getName(), "MinOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegrationError()->getName(), "MinErrorOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegrationWeightGrad()->getName(), "MinWeightGradOp"); } // check the links for (const std::string& name : link_names_dummyPos) { BOOST_CHECK_EQUAL(model.getLink(name).getName(), name); std::vector<std::string> test = SplitString(name, "_to_"); BOOST_CHECK_EQUAL(model.getLink(name).getSourceNodeName(), test[0]); BOOST_CHECK_EQUAL(model.getLink(name).getSinkNodeName(), test[1]); BOOST_CHECK_EQUAL(model.getLink(name).getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(name, model.getLink(name).getWeightName()); } for (const std::string& name : link_names_dummyNeg) { BOOST_CHECK_EQUAL(model.getLink(name).getName(), name); std::vector<std::string> test = SplitString(name, "_to_"); BOOST_CHECK_EQUAL(model.getLink(name).getSourceNodeName(), test[0]); BOOST_CHECK_EQUAL(model.getLink(name).getSinkNodeName(), test[1]); BOOST_CHECK_EQUAL(model.getLink(name).getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(name, model.getLink(name).getWeightName()); } for (const std::string& name : link_names_learnable) { BOOST_CHECK_EQUAL(model.getLink(name).getName(), name); std::vector<std::string> test = SplitString(name, "_to_"); BOOST_CHECK_EQUAL(model.getLink(name).getSourceNodeName(), test[0]); BOOST_CHECK_EQUAL(model.getLink(name).getSinkNodeName(), test[1]); BOOST_CHECK_EQUAL(model.getLink(name).getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(name, model.getLink(name).getWeightName()); } // check the weights for (const std::string& name : link_names_dummyPos) { BOOST_CHECK_EQUAL(model.getWeight(name).getName(), name); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getName(), "ConstWeightInitOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getParamsAsStr(), "n:1.000000"); BOOST_CHECK_EQUAL(model.getWeight(name).getSolverOp()->getName(), "DummySolverOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getModuleName(), "Mod1"); } for (const std::string& name : link_names_dummyNeg) { BOOST_CHECK_EQUAL(model.getWeight(name).getName(), name); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getName(), "ConstWeightInitOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getParamsAsStr(), "n:-1.000000"); BOOST_CHECK_EQUAL(model.getWeight(name).getSolverOp()->getName(), "DummySolverOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getModuleName(), "Mod1"); } for (const std::string& name : link_names_learnable) { BOOST_CHECK_EQUAL(model.getWeight(name).getName(), name); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getName(), "RandWeightInitOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getSolverOp()->getName(), "SGDOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getModuleName(), "Mod1"); } } BOOST_AUTO_TEST_CASE(addBiochemicalReaction2) { ModelBuilderExperimental<float> model_builder; Model<float> model; // make the toy model BiochemicalReaction reaction1; reaction1.reaction_id = "HK1"; reaction1.reaction_name = "Hexokinase1"; reaction1.products_ids = std::vector<std::string>({ "g6p","h","adp" }); reaction1.products_stoichiometry = std::vector<float>({ 1, 1, 1 }); reaction1.reactants_ids = std::vector<std::string>({ "glc","atp" }); reaction1.reactants_stoichiometry = std::vector<float>({ 1, 1 }); reaction1.used = true; reaction1.reversibility = false; BiochemicalReactions reactions; reactions.emplace("HK", reaction1); // make the fully connected model_builder.addBiochemicalReactionsSequencialMin( model, reactions, "Mod", "Mod1", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(4.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9)), 2); std::vector<std::string> node_names_minReLU = { "HK1:glc:atp","HK1::adp::h::g6p" }; std::vector<std::string> node_names_sumReLU = { "HK1", "adp","atp","g6p","glc","h", "HK1::glc::atp", "HK1:adp:h:g6p","HK1&glc&atp" }; std::vector<std::string> link_names_dummyPos = { "HK1&glc&atp_to_HK1::adp::h::g6p", "HK1::glc::atp_to_HK1&glc&atp", "HK1:adp:h:g6p_to_HK1", "HK1:adp:h:g6p_to_adp", "HK1:adp:h:g6p_to_g6p", "HK1:adp:h:g6p_to_h", "HK1_to_HK1:glc:atp", "atp_to_HK1:glc:atp", "glc_to_HK1:glc:atp" }; std::vector<std::string> link_names_dummyNeg = { "HK1::glc::atp_to_HK1", "HK1::glc::atp_to_atp", "HK1::glc::atp_to_glc", "HK1:adp:h:g6p_to_HK1&glc&atp" }; std::vector<std::string> link_names_learnable = { "HK1::adp::h::g6p_to_HK1:adp:h:g6p", "HK1:glc:atp_to_HK1::glc::atp" }; //for (auto& e : model.nodes_) // std::cout << "Node: " << e.second->getName() << std::endl; //for (auto& e : model.links_) // std::cout << "Link: " << e.second->getName() << std::endl; //for (auto& e : model.weights_) // std::cout << "Weight: " << e.second->getName() << std::endl; // check the nodes for (const std::string& node_name : node_names_sumReLU) { BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getName(), node_name); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getActivation()->getName(), "ReLUOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getActivationGrad()->getName(), "ReLUGradOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegration()->getName(), "SumOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegrationError()->getName(), "SumErrorOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegrationWeightGrad()->getName(), "SumWeightGradOp"); } for (const std::string& node_name : node_names_minReLU) { BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getName(), node_name); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getActivation()->getName(), "ReLUOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getActivationGrad()->getName(), "ReLUGradOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegration()->getName(), "MinOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegrationError()->getName(), "MinErrorOp"); BOOST_CHECK_EQUAL(model.nodes_.at(node_name)->getIntegrationWeightGrad()->getName(), "MinWeightGradOp"); } // check the links for (const std::string& name : link_names_dummyPos) { BOOST_CHECK_EQUAL(model.getLink(name).getName(), name); std::vector<std::string> test = SplitString(name, "_to_"); BOOST_CHECK_EQUAL(model.getLink(name).getSourceNodeName(), test[0]); BOOST_CHECK_EQUAL(model.getLink(name).getSinkNodeName(), test[1]); BOOST_CHECK_EQUAL(model.getLink(name).getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(name, model.getLink(name).getWeightName()); } for (const std::string& name : link_names_dummyNeg) { BOOST_CHECK_EQUAL(model.getLink(name).getName(), name); std::vector<std::string> test = SplitString(name, "_to_"); BOOST_CHECK_EQUAL(model.getLink(name).getSourceNodeName(), test[0]); BOOST_CHECK_EQUAL(model.getLink(name).getSinkNodeName(), test[1]); BOOST_CHECK_EQUAL(model.getLink(name).getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(name, model.getLink(name).getWeightName()); } for (const std::string& name : link_names_learnable) { BOOST_CHECK_EQUAL(model.getLink(name).getName(), name); std::vector<std::string> test = SplitString(name, "_to_"); BOOST_CHECK_EQUAL(model.getLink(name).getSourceNodeName(), test[0]); BOOST_CHECK_EQUAL(model.getLink(name).getSinkNodeName(), test[1]); BOOST_CHECK_EQUAL(model.getLink(name).getModuleName(), "Mod1"); BOOST_CHECK_EQUAL(name, model.getLink(name).getWeightName()); } // check the weights // TODO: test for the correct stoich by changing the stoich of the reactants/products to != 1 for (const std::string& name : link_names_dummyPos) { BOOST_CHECK_EQUAL(model.getWeight(name).getName(), name); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getName(), "ConstWeightInitOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getParamsAsStr(), "n:1.000000"); BOOST_CHECK_EQUAL(model.getWeight(name).getSolverOp()->getName(), "DummySolverOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getModuleName(), "Mod1"); } for (const std::string& name : link_names_dummyNeg) { BOOST_CHECK_EQUAL(model.getWeight(name).getName(), name); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getName(), "ConstWeightInitOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getParamsAsStr(), "n:-1.000000"); BOOST_CHECK_EQUAL(model.getWeight(name).getSolverOp()->getName(), "DummySolverOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getModuleName(), "Mod1"); } for (const std::string& name : link_names_learnable) { BOOST_CHECK_EQUAL(model.getWeight(name).getName(), name); BOOST_CHECK_EQUAL(model.getWeight(name).getWeightInitOp()->getName(), "RandWeightInitOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getSolverOp()->getName(), "SGDOp"); BOOST_CHECK_EQUAL(model.getWeight(name).getModuleName(), "Mod1"); } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_NODETENSORDATA_H #define EVONET_NODETENSORDATA_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif #include <unsupported/Eigen/CXX11/Tensor> #include <EvoNet/ml/Node.h> #include <memory> //#include <cereal/access.hpp> // serialiation of private members //#include <cereal/types/memory.hpp> //#undef min // clashes with std::limit on windows in polymorphic.hpp //#undef max // clashes with std::limit on windows in polymorphic.hpp //#include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Network NodeMatrixData */ template<typename TensorT, typename DeviceT> class NodeTensorData { public: NodeTensorData() = default; ///< Default constructor NodeTensorData(const NodeTensorData& other) { h_input_ = other.h_input_; h_output_ = other.h_output_; h_error_ = other.h_error_; h_derivative_ = other.h_derivative_; h_dt_ = other.h_dt_; d_input_ = other.d_input_; d_output_ = other.d_output_; d_error_ = other.d_error_; d_derivative_ = other.d_derivative_; d_dt_ = other.d_dt_; batch_size_ = other.batch_size_; memory_size_ = other.memory_size_; layer_size_ = other.layer_size_; h_input_updated_ = other.h_input_updated_; h_output_updated_ = other.h_output_updated_; h_error_updated_ = other.h_error_updated_; h_derivative_updated_ = other.h_derivative_updated_; h_dt_updated_ = other.h_dt_updated_; d_input_updated_ = other.d_input_updated_; d_output_updated_ = other.d_output_updated_; d_error_updated_ = other.d_error_updated_; d_derivative_updated_ = other.d_derivative_updated_; d_dt_updated_ = other.d_dt_updated_; }; ~NodeTensorData() = default; ///< Default destructor inline bool operator==(const NodeTensorData& other) const { return std::tie( ) == std::tie( ) ; } inline bool operator!=(const NodeTensorData& other) const { return !(*this == other); } inline NodeTensorData& operator=(const NodeTensorData& other) { h_input_ = other.h_input_; h_output_ = other.h_output_; h_error_ = other.h_error_; h_derivative_ = other.h_derivative_; h_dt_ = other.h_dt_; d_input_ = other.d_input_; d_output_ = other.d_output_; d_error_ = other.d_error_; d_derivative_ = other.d_derivative_; d_dt_ = other.d_dt_; batch_size_ = other.batch_size_; memory_size_ = other.memory_size_; layer_size_ = other.layer_size_; h_input_updated_ = other.h_input_updated_; h_output_updated_ = other.h_output_updated_; h_error_updated_ = other.h_error_updated_; h_derivative_updated_ = other.h_derivative_updated_; h_dt_updated_ = other.h_dt_updated_; d_input_updated_ = other.d_input_updated_; d_output_updated_ = other.d_output_updated_; d_error_updated_ = other.d_error_updated_; d_derivative_updated_ = other.d_derivative_updated_; d_dt_updated_ = other.d_dt_updated_; return *this; } void setBatchSize(const int& batch_size) { batch_size_ = batch_size; } void setMemorySize(const int& memory_size) { memory_size_ = memory_size; } void setLayerSize(const int& layer_size) { layer_size_ = layer_size; } void setLayerIntegration(const std::string& layer_integration) { layer_integration_ = layer_integration; } int getBatchSize() const { return batch_size_; } int getMemorySize() const { return memory_size_; } int getLayerSize() const { return layer_size_; } std::string getLayerIntegration() const { return layer_integration_; } virtual void setInput(const Eigen::Tensor<TensorT, 3>& input) = 0; ///< input setter Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> getInput() { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> input(h_input_.get(), batch_size_, memory_size_, layer_size_); return input; }; ///< input copy getter std::shared_ptr<TensorT[]> getHInputPointer() { return h_input_; }; ///< input pointer getter std::shared_ptr<TensorT[]> getDInputPointer() { return d_input_; }; ///< input pointer getter virtual void setOutput(const Eigen::Tensor<TensorT, 3>& output) = 0; ///< output setter Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> getOutput() { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> output(h_output_.get(), batch_size_, memory_size_, layer_size_); return output; }; ///< output copy getter std::shared_ptr<TensorT[]> getHOutputPointer() { return h_output_; }; ///< output pointer getter std::shared_ptr<TensorT[]> getDOutputPointer() { return d_output_; }; ///< output pointer getter virtual void setError(const Eigen::Tensor<TensorT, 3>& error) = 0; ///< error setter Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> getError() { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error(h_error_.get(), batch_size_, memory_size_, layer_size_); return error; }; ///< error copy getter std::shared_ptr<TensorT[]> getHErrorPointer() { return h_error_; }; ///< error pointer getter std::shared_ptr<TensorT[]> getDErrorPointer() { return d_error_; }; ///< error pointer getter virtual void setDerivative(const Eigen::Tensor<TensorT, 3>& derivative) = 0; ///< derivative setter Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> getDerivative() { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> derivative(h_derivative_.get(), batch_size_, memory_size_, layer_size_); return derivative; }; ///< derivative copy getter std::shared_ptr<TensorT[]> getHDerivativePointer() { return h_derivative_; }; ///< derivative pointer getter std::shared_ptr<TensorT[]> getDDerivativePointer() { return d_derivative_; }; ///< derivative pointer getter virtual void setDt(const Eigen::Tensor<TensorT, 3>& dt) = 0; ///< dt setter Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> getDt() { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> dt(h_dt_.get(), batch_size_, memory_size_, layer_size_); return dt; }; ///< dt copy getter std::shared_ptr<TensorT[]> getHDtPointer() { return h_dt_; }; ///< dt pointer getter std::shared_ptr<TensorT[]> getDDtPointer() { return d_dt_; }; ///< dt pointer getter size_t getTensorSize() { return batch_size_ * memory_size_ * layer_size_ * sizeof(TensorT); }; ///< Get the size of each tensor in bytes void initNodeTensorData(const int& batch_size, const int& memory_size, const int& layer_size, const NodeType& node_type, const std::string& node_integration, const bool& train); ///< initialize the node according to node type virtual bool syncHAndDInput(DeviceT& device) = 0; virtual bool syncHAndDOutput(DeviceT& device) = 0; virtual bool syncHAndDError(DeviceT& device) = 0; virtual bool syncHAndDDerivative(DeviceT& device) = 0; virtual bool syncHAndDDt(DeviceT& device) = 0; std::pair<bool, bool> getInputStatus() { return std::make_pair(h_input_updated_, d_input_updated_); }; std::pair<bool, bool> getOutputStatus() { return std::make_pair(h_output_updated_, d_output_updated_); }; std::pair<bool, bool> getErrorStatus() { return std::make_pair(h_error_updated_, d_error_updated_); }; std::pair<bool, bool> getDerivativeStatus() { return std::make_pair(h_derivative_updated_, d_derivative_updated_); }; std::pair<bool, bool> getDtStatus() { return std::make_pair(h_dt_updated_, d_dt_updated_); }; protected: int batch_size_ = 1; ///< Mini batch size int memory_size_ = 2; ///< Memory size int layer_size_ = 1; ///< Layer size /** @brief output, error and derivative have the following dimensions: rows: # of samples, cols: # of time steps where the number of samples spans 0 to n samples and the number of time steps spans m time points to 0 */ std::shared_ptr<TensorT[]> h_input_ = nullptr; std::shared_ptr<TensorT[]> h_output_ = nullptr; std::shared_ptr<TensorT[]> h_error_ = nullptr; std::shared_ptr<TensorT[]> h_derivative_ = nullptr; std::shared_ptr<TensorT[]> h_dt_ = nullptr; // [TODO: change to drop probability] std::shared_ptr<TensorT[]> d_input_ = nullptr; std::shared_ptr<TensorT[]> d_output_ = nullptr; std::shared_ptr<TensorT[]> d_error_ = nullptr; std::shared_ptr<TensorT[]> d_derivative_ = nullptr; std::shared_ptr<TensorT[]> d_dt_ = nullptr; bool h_input_updated_ = false; bool h_output_updated_ = false; bool h_error_updated_ = false; bool h_derivative_updated_ = false; bool h_dt_updated_ = false; bool d_input_updated_ = false; bool d_output_updated_ = false; bool d_error_updated_ = false; bool d_derivative_updated_ = false; bool d_dt_updated_ = false; std::string layer_integration_; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(batch_size_, memory_size_, layer_size_, // h_input_, h_output_, h_error_, h_derivative_, h_dt_, // d_input_, d_output_, d_error_, d_derivative_, d_dt_, // h_input_updated_, h_output_updated_, h_error_updated_, h_derivative_updated_, h_dt_updated_, // d_input_updated_, d_output_updated_, d_error_updated_, d_derivative_updated_, d_dt_updated_); // } }; template<typename TensorT, typename DeviceT> inline void NodeTensorData<TensorT, DeviceT>::initNodeTensorData(const int& batch_size, const int& memory_size, const int& layer_size, const NodeType& node_type, const std::string& node_integration, const bool& train) { setBatchSize(batch_size); setMemorySize(memory_size); setLayerSize(layer_size); setLayerIntegration(node_integration); // Template zero and one tensor Eigen::Tensor<TensorT, 3> zero(batch_size, memory_size, layer_size); zero.setConstant((TensorT)0); Eigen::Tensor<TensorT, 3> one(batch_size, memory_size, layer_size); one.setConstant((TensorT)1); // set the input, error, and derivatives setError(zero); setDerivative(zero); setDt(one); //// set Drop probabilities [TODO: broke when adding NodeData...] //if (train) { // setDt(one.unaryExpr(RandBinaryOp<TensorT>(getDropProbability()))); //} else { // setDt(one); //} // corrections for specific node types if (node_type == NodeType::bias) { setOutput(one); setInput(zero); } else if (node_type == NodeType::input) { setOutput(zero); // Check for `node_integration` == "ProdOp" and set to 1 setInput(zero); } else if (node_type == NodeType::zero) { setOutput(zero); setInput(zero); } else if (node_integration == "ProdOp" || node_integration == "ProdSCOp") { setOutput(zero); setInput(one); } else { setOutput(zero); setInput(zero); } } template<typename TensorT> class NodeTensorDataCpu : public NodeTensorData<TensorT, Eigen::DefaultDevice> { public: void setInput(const Eigen::Tensor<TensorT, 3>& input) { TensorT* h_input = new TensorT[this->batch_size_*this->memory_size_*this->layer_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> input_copy(h_input, this->batch_size_, this->memory_size_, this->layer_size_); input_copy = input; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_input_.reset(h_input, h_deleter); this->h_input_.reset(h_input); this->h_input_updated_ = true; this->d_input_updated_ = true; }; ///< input setter void setOutput(const Eigen::Tensor<TensorT, 3>& output) { TensorT* h_output = new TensorT[this->batch_size_*this->memory_size_*this->layer_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> output_copy(h_output, this->batch_size_, this->memory_size_, this->layer_size_); output_copy = output; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_output_.reset(h_output, h_deleter); this->h_output_.reset(h_output); this->h_output_updated_ = true; this->d_output_updated_ = true; }; ///< output setter void setError(const Eigen::Tensor<TensorT, 3>& error) { TensorT* h_error = new TensorT[this->batch_size_*this->memory_size_*this->layer_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_copy(h_error, this->batch_size_, this->memory_size_, this->layer_size_); error_copy = error; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_error_.reset(h_error, h_deleter); this->h_error_.reset(h_error); this->h_error_updated_ = true; this->d_error_updated_ = true; }; ///< error setter void setDerivative(const Eigen::Tensor<TensorT, 3>& derivative) { TensorT* h_derivative = new TensorT[this->batch_size_*this->memory_size_*this->layer_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> derivative_copy(h_derivative, this->batch_size_, this->memory_size_, this->layer_size_); derivative_copy = derivative; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_derivative_.reset(h_derivative, h_deleter); this->h_derivative_.reset(h_derivative); this->h_derivative_updated_ = true; this->d_derivative_updated_ = true; }; ///< derivative setter void setDt(const Eigen::Tensor<TensorT, 3>& dt) { TensorT* h_dt = new TensorT[this->batch_size_*this->memory_size_*this->layer_size_]; // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> dt_copy(h_dt, this->batch_size_, this->memory_size_, this->layer_size_); dt_copy = dt; //auto h_deleter = [&](TensorT* ptr) { delete[] ptr; }; //this->h_dt_.reset(h_dt, h_deleter); this->h_dt_.reset(h_dt); this->h_dt_updated_ = true; this->d_dt_updated_ = true; }; ///< dt setter bool syncHAndDInput(Eigen::DefaultDevice& device) { return true; } bool syncHAndDOutput(Eigen::DefaultDevice& device) { return true; } bool syncHAndDError(Eigen::DefaultDevice& device) { return true; } bool syncHAndDDerivative(Eigen::DefaultDevice& device) { return true; } bool syncHAndDDt(Eigen::DefaultDevice& device) { return true; } //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<NodeTensorData<TensorT, Eigen::DefaultDevice>>(this)); // } }; #if COMPILE_WITH_CUDA template<typename TensorT> class NodeTensorDataGpu : public NodeTensorData<TensorT, Eigen::GpuDevice> { public: void setInput(const Eigen::Tensor<TensorT, 3>& input) { // allocate cuda and pinned host memory TensorT* d_input; TensorT* h_input; assert(cudaMalloc((void**)(&d_input), getTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_input), getTensorSize(), cudaHostAllocDefault ) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> input_copy(h_input, this->batch_size_, this->memory_size_, this->layer_size_); input_copy = input; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_input_.reset(h_input, h_deleter); this->d_input_.reset(d_input, d_deleter); this->h_input_updated_ = true; this->d_input_updated_ = false; }; ///< input setter void setOutput(const Eigen::Tensor<TensorT, 3>& output) { // allocate cuda and pinned host memory TensorT* d_output; TensorT* h_output; assert(cudaMalloc((void**)(&d_output), getTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_output), getTensorSize(), cudaHostAllocDefault ) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> output_copy(h_output, this->batch_size_, this->memory_size_, this->layer_size_); output_copy = output; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_output_.reset(h_output, h_deleter); this->d_output_.reset(d_output, d_deleter); this->h_output_updated_ = true; this->d_output_updated_ = false; }; ///< output setter void setError(const Eigen::Tensor<TensorT, 3>& error) { // allocate cuda and pinned host memory TensorT* d_error; TensorT* h_error; assert(cudaMalloc((void**)(&d_error), getTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_error), getTensorSize(), cudaHostAllocDefault ) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_copy(h_error, this->batch_size_, this->memory_size_, this->layer_size_); error_copy = error; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_error_.reset(h_error, h_deleter); this->d_error_.reset(d_error, d_deleter); this->h_error_updated_ = true; this->d_error_updated_ = false; }; ///< error setter void setDerivative(const Eigen::Tensor<TensorT, 3>& derivative) { // allocate cuda and pinned host memory TensorT* d_derivative; TensorT* h_derivative; assert(cudaMalloc((void**)(&d_derivative), getTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_derivative), getTensorSize(), cudaHostAllocDefault ) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> derivative_copy(h_derivative, this->batch_size_, this->memory_size_, this->layer_size_); derivative_copy = derivative; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_derivative_.reset(h_derivative, h_deleter); this->d_derivative_.reset(d_derivative, d_deleter); this->h_derivative_updated_ = true; this->d_derivative_updated_ = false; }; ///< derivative setter void setDt(const Eigen::Tensor<TensorT, 3>& dt) { // allocate cuda and pinned host memory TensorT* d_dt; TensorT* h_dt; assert(cudaMalloc((void**)(&d_dt), getTensorSize()) == cudaSuccess); assert(cudaHostAlloc((void**)(&h_dt), getTensorSize(), cudaHostAllocDefault ) == cudaSuccess); // copy the tensor Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> dt_copy(h_dt, this->batch_size_, this->memory_size_, this->layer_size_); dt_copy = dt; // define the deleters auto h_deleter = [&](TensorT* ptr) { assert(cudaFreeHost(ptr) == cudaSuccess); }; auto d_deleter = [&](TensorT* ptr) { assert(cudaFree(ptr) == cudaSuccess); }; this->h_dt_.reset(h_dt, h_deleter); this->d_dt_.reset(d_dt, d_deleter); this->h_dt_updated_ = true; this->d_dt_updated_ = false; }; ///< dt setter bool syncHAndDInput(Eigen::GpuDevice& device){ if (this->h_input_updated_ && !this->d_input_updated_) { device.memcpyHostToDevice(this->d_input_.get(), this->h_input_.get(), getTensorSize()); this->d_input_updated_ = true; this->h_input_updated_ = false; return true; } else if (!this->h_input_updated_ && this->d_input_updated_) { device.memcpyDeviceToHost(this->h_input_.get(), this->d_input_.get(), getTensorSize()); this->h_input_updated_ = true; this->d_input_updated_ = false; return true; } else { std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } } bool syncHAndDOutput(Eigen::GpuDevice& device){ if (this->h_output_updated_ && !this->d_output_updated_) { device.memcpyHostToDevice(this->d_output_.get(), this->h_output_.get(), getTensorSize()); this->d_output_updated_ = true; this->h_output_updated_ = false; return true; } else if (!this->h_output_updated_ && this->d_output_updated_) { device.memcpyDeviceToHost(this->h_output_.get(), this->d_output_.get(), getTensorSize()); this->h_output_updated_ = true; this->d_output_updated_ = false; return true; } else { std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } } bool syncHAndDError(Eigen::GpuDevice& device){ if (this->h_error_updated_ && !this->d_error_updated_) { device.memcpyHostToDevice(this->d_error_.get(), this->h_error_.get(), getTensorSize()); this->d_error_updated_ = true; this->h_error_updated_ = false; return true; } else if (!this->h_error_updated_ && this->d_error_updated_) { device.memcpyDeviceToHost(this->h_error_.get(), this->d_error_.get(), getTensorSize()); this->h_error_updated_ = true; this->d_error_updated_ = false; return true; } else { std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } } bool syncHAndDDerivative(Eigen::GpuDevice& device){ if (this->h_derivative_updated_ && !this->d_derivative_updated_) { device.memcpyHostToDevice(this->d_derivative_.get(), this->h_derivative_.get(), getTensorSize()); this->d_derivative_updated_ = true; this->h_derivative_updated_ = false; return true; } else if (!this->h_derivative_updated_ && this->d_derivative_updated_) { device.memcpyDeviceToHost(this->h_derivative_.get(), this->d_derivative_.get(), getTensorSize()); this->h_derivative_updated_ = true; this->d_derivative_updated_ = false; return true; } else { std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } return true; } bool syncHAndDDt(Eigen::GpuDevice& device){ if (this->h_dt_updated_ && !this->d_dt_updated_) { device.memcpyHostToDevice(this->d_dt_.get(), this->h_dt_.get(), getTensorSize()); this->d_dt_updated_ = true; this->h_dt_updated_ = false; return true; } else if (!this->h_dt_updated_ && this->d_dt_updated_) { device.memcpyDeviceToHost(this->h_dt_.get(), this->d_dt_.get(), getTensorSize()); this->h_dt_updated_ = true; this->d_dt_updated_ = false; return true; } else { std::cout << "Both host and device are syncHAndDronized." << std::endl; return false; } return true; } //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<NodeTensorData<TensorT, Eigen::GpuDevice>>(this)); // } }; #endif } //CEREAL_REGISTER_TYPE(EvoNet::NodeTensorDataCpu<float>); //// TODO: add double, int, etc. //#if COMPILE_WITH_CUDA //CEREAL_REGISTER_TYPE(EvoNet::NodeTensorDataGpu<float>); //// TODO: add double, int, etc. //#endif #endif //EVONET_NODETENSORDATA_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_CHROMATOGRAMSIMULATOR_H #define EVONET_CHROMATOGRAMSIMULATOR_H // .h #include <EvoNet/simulator/EMGModel.h> #include <EvoNet/simulator/PeakSimulator.h> #include <EvoNet/simulator/DataSimulator.h> namespace EvoNet { /** @brief A class to generate points that represent an LC-MS, GC-MS, or HPLC chromatogram */ template <typename TensorT> class ChromatogramSimulator: public DataSimulator<TensorT> { public: ChromatogramSimulator() = default; ///< Default constructor ~ChromatogramSimulator() = default; ///< Default destructor /** @brief Simulates a chromatogram. A random number of peaks with random properties are generated and combined into a chromatogram. Based on the parameters chosen fewer peaks may actually be made. This could be caused by neighboring baselines that are higher than the peak. The actual best left and right pairs that define the peaks will be returned. @param[out] x_noise_IO A vector of x values representing time or m/z @param[out] y_noise_IO A vector of y values representing the intensity at time t or m/z m @param[out] x_IO A vector of x values representing time or m/z @param[out] y_IO A vector of y values representing the intensity at time t or m/z m @param[out] peaks_LR A vector of best left and best right pairs @param[out] peaks_apices A vector of peak apices @param[out] emgs_O A vector of final EMG parameters used to make the peaks @param[in] step_size_mu @param[in] step_size_sigma @param[in] chrom_window_size The lower and upper bounds for the maximum size of the chromatogram @param[in] noise_mu @param[in] noise_sigma @param[in] baseline_height The lower and upper bounds of the baseline heights @param[in] n_peaks The number of peaks in the chromatogram @param[in] emg_h @param[in] emg_tau @param[in] emg_mu_offset The lower and upper bounds for the Distance +/- from the peak window center @param[in] emg_sigma */ void simulateChromatogram(std::vector<TensorT>& x_O, std::vector<TensorT>& y_O, std::vector<TensorT>& x_noise_O, std::vector<TensorT>& y_noise_O, std::vector<std::pair<TensorT, TensorT>>& peaks_LR, std::vector<TensorT>& peak_apices, std::vector<EMGModel<TensorT>>& emgs_O, const std::pair<TensorT, TensorT>& step_size_mu, const std::pair<TensorT, TensorT>& step_size_sigma, const std::pair<TensorT, TensorT>& chrom_window_size, const std::pair<TensorT, TensorT>& noise_mu, const std::pair<TensorT, TensorT>& noise_sigma, const std::pair<TensorT, TensorT>& baseline_height, const std::pair<TensorT, TensorT>& n_peaks, const std::pair<TensorT, TensorT>& emg_h, const std::pair<TensorT, TensorT>& emg_tau, const std::pair<TensorT, TensorT>& emg_mu_offset, const std::pair<TensorT, TensorT>& emg_sigma, TensorT saturation_limit = (TensorT)100) const; /** @brief Makes a chromatogram. The left baseline of the first peak window will define the left baseline of the chromatogram, while the right baseline of the last peak window will define the right baseline of the chromatogram. Peaks in the middle can overlap, but only the highest intensity of the overlapped peak will be kept similar to the behavior captured by the total ion chromatogram (TIC) or extract ion chromatogram (XIC). Peak windows can also be disconnected. Gaps in peak windows will be filled by extending the right baseline of the left most peak to the beginning of the left baseline of the right most peak. @example peak 1: noisy baseline that will extend to the actual first peak peak 2: the actual first peak. peak 3: next peak that may or may not be baseline seperated from the first peak ... peak n: noise baseline that will extend from the last peak to the end of the chromatogram window @param[out] x_IO A vector of x values representing time or m/z @param[out] y_IO A vector of y values representing the intensity at time t or m/z m @param[out] peaks_LR Vector of best left/right pairs @param[out] peak_apices Vector of peak apices @param[in] peaks list of PeakSimulator classes that will compose the chromatogram @param[in] emgs list of corresponding EMGModel classes that define each peak */ void makeChromatogram(std::vector<TensorT>& x_O, std::vector<TensorT>& y_O, std::vector<std::pair<TensorT, TensorT>>& peaks_LR, std::vector<TensorT>& peak_apices, const std::vector<PeakSimulator<TensorT>>& peaks, const std::vector<EMGModel<TensorT>>& emgs) const; /** @brief Joins peak windows. Overlapping or disconnected peak windows will be joined by extending the highest connecting baseline. @param[in,out] peak_left Left peak @param[in,out] emg_left Left peak EMGModel @param[in,out] peak_right Right peak @param[in,out] emg_right Right peak EMGModel */ void joinPeakWindows( PeakSimulator<TensorT>& peak_left, EMGModel<TensorT>& emg_left, PeakSimulator<TensorT>& peak_right, EMGModel<TensorT>& emg_right) const; /** @brief Find the overlap between two peak windows. The point of overlap between two peaks will be returned. @param[in,out] peak_left Left peak @param[in,out] emg_left Left peak EMGModel @param[in,out] peak_right Right peak @param[in,out] emg_right Right peak EMGModel @returns overlap The point at which both peaks overlap */ TensorT findPeakOverlap( const PeakSimulator<TensorT>& peak_left, const EMGModel<TensorT>& emg_left, const PeakSimulator<TensorT>& peak_right, const EMGModel<TensorT>& emg_right) const; }; template <typename TensorT> TensorT ChromatogramSimulator<TensorT>::findPeakOverlap( const PeakSimulator<TensorT>& peak_left, const EMGModel<TensorT>& emg_left, const PeakSimulator<TensorT>& peak_right, const EMGModel<TensorT>& emg_right) const { std::vector<TensorT> x_left, y_left, x_right, y_right; PeakSimulator<TensorT> peak_l = peak_left; PeakSimulator<TensorT> peak_r = peak_right; // move windows just to the overlapping region peak_l.setWindowStart(peak_r.getWindowStart()); peak_r.setWindowEnd(peak_l.getWindowEnd()); // simulate the peaks for the overlapping regions peak_l.simulatePeak(x_left, y_left, emg_left); peak_r.simulatePeak(x_right, y_right, emg_right); // find the highest point where the peaks cross TensorT x_overlap = peak_left.getWindowEnd(); TensorT y_overlap = (TensorT)0.0; for (int i = x_right.size() - 1; i >= 0; --i) { // iterate in reverse order to extend the left peak for (int j = x_left.size() - 1; j >= 0; --j) { if (x_right[i] <= x_left[j] && y_right[i] <= y_left[j]) { if (y_overlap < y_right[i]) { y_overlap = y_right[i]; x_overlap = x_right[i]; } } } } return x_overlap; } template <typename TensorT> void ChromatogramSimulator<TensorT>::joinPeakWindows( PeakSimulator<TensorT>& peak_left, EMGModel<TensorT>& emg_left, PeakSimulator<TensorT>& peak_right, EMGModel<TensorT>& emg_right) const { // Check order of left and right peaks if (peak_left.getWindowStart() > peak_right.getWindowStart() && peak_left.getWindowEnd() > peak_right.getWindowEnd()) { // peaks are swapped std::cout << "Left and right peaks are swapped!" << std::endl; std::swap(peak_left, peak_right); std::swap(emg_left, emg_right); } const TensorT x_delta = peak_right.getWindowStart() - peak_left.getWindowEnd(); const TensorT y_delta = peak_right.getBaselineLeft() - peak_left.getBaselineRight(); if (x_delta >= 0.0 && y_delta <= 0.0) { // Non overlapping windows; Left baseline is higher // increase the right peak baseline to match the left peak baseline peak_right.setBaselineLeft(peak_left.getBaselineRight()); // extend left baseline to right baseline using the left peak sample rate peak_left.setWindowEnd(peak_right.getWindowStart()); } else if (x_delta >= 0.0 && y_delta > 0.0) { // Non overlapping windows; Left baseline is lower // increase the left peak baseline to match the right peak baseline peak_left.setBaselineRight(peak_right.getBaselineLeft()); // extend the left baseline using the left peak sample rate peak_left.setWindowEnd(peak_right.getWindowStart()); } else if (x_delta < 0.0 && y_delta <= 0.0) { // Overlapping windows; Left baseline is higher // increase the right peak baseline to match the left peak baseline peak_right.setBaselineLeft(peak_left.getBaselineRight()); // find the overlap const TensorT overlap = findPeakOverlap( peak_left, emg_left, peak_right, emg_right ); peak_right.setWindowStart(overlap); peak_left.setWindowEnd(overlap); } else if (x_delta < 0.0 && y_delta > 0.0) { // Overlapping windows; Right baseline is higher // increase the left peak baseline to match the right peak baseline peak_left.setBaselineRight(peak_right.getBaselineLeft()); // find the overlap const TensorT overlap = findPeakOverlap( peak_left, emg_left, peak_right, emg_right ); peak_right.setWindowStart(overlap); peak_left.setWindowEnd(overlap); } } template<typename TensorT> inline void ChromatogramSimulator<TensorT>::simulateChromatogram(std::vector<TensorT>& x_O, std::vector<TensorT>& y_O, std::vector<TensorT>& x_noise_O, std::vector<TensorT>& y_noise_O, std::vector<std::pair<TensorT, TensorT>>& peaks_LR, std::vector<TensorT>& peak_apices, std::vector<EMGModel<TensorT>>& emgs_O, const std::pair<TensorT, TensorT>& step_size_mu, const std::pair<TensorT, TensorT>& step_size_sigma, const std::pair<TensorT, TensorT>& chrom_window_size, const std::pair<TensorT, TensorT>& noise_mu, const std::pair<TensorT, TensorT>& noise_sigma, const std::pair<TensorT, TensorT>& baseline_height, const std::pair<TensorT, TensorT>& n_peaks, const std::pair<TensorT, TensorT>& emg_h, const std::pair<TensorT, TensorT>& emg_tau, const std::pair<TensorT, TensorT>& emg_mu_offset, const std::pair<TensorT, TensorT>& emg_sigma, TensorT saturation_limit) const { // lampda for choosing a random number within l/u bounds auto random_bounds = [](const TensorT& lb, const TensorT& ub)->TensorT { std::random_device rd; // obtain a random number from hardware std::mt19937 eng(rd()); // seed the generator std::uniform_int_distribution<> distr(lb, ub); // define the range return (TensorT)distr(eng); }; // determine the chrom window size, saturation limits, and number of peaks TensorT chrom_window_size_rand = random_bounds(chrom_window_size.first, chrom_window_size.second); TensorT n_peaks_rand = random_bounds(n_peaks.first, n_peaks.second); TensorT peak_window_length = chrom_window_size_rand / n_peaks_rand; // determine the sampling rate TensorT step_size_mu_rand = random_bounds(step_size_mu.first, step_size_mu.second); TensorT step_size_sigma_rand = random_bounds(step_size_sigma.first, step_size_sigma.second); // generate a random set of peaks std::vector<PeakSimulator<TensorT>> peaks, peaks_noise; emgs_O.clear(); for (int peak_iter = 0; peak_iter < n_peaks_rand; ++peak_iter) { // Define the peak TensorT baseline_left = random_bounds(baseline_height.first, baseline_height.second); TensorT baseline_right = random_bounds(baseline_height.first, baseline_height.second); TensorT noise_mu_rand = random_bounds(noise_mu.first, noise_mu.second); TensorT noise_sigma_rand = random_bounds(noise_sigma.first, noise_sigma.second); TensorT peak_start = (TensorT)peak_iter * peak_window_length; TensorT peak_end = (TensorT)(peak_iter + 1) * peak_window_length; peaks.push_back(PeakSimulator<TensorT>(step_size_mu_rand, (TensorT)0, peak_start, peak_end, (TensorT)0, (TensorT)0, baseline_left, baseline_right, saturation_limit)); peaks_noise.push_back(PeakSimulator<TensorT>(step_size_mu_rand, step_size_sigma_rand, peak_start, peak_end, noise_mu_rand, noise_sigma_rand, baseline_left, baseline_right, saturation_limit)); // Define the EMG generator TensorT h = random_bounds(emg_h.first, emg_h.second); TensorT tau = random_bounds(emg_tau.first, emg_tau.second); TensorT mu = random_bounds(emg_mu_offset.first, emg_mu_offset.second) + ((peak_end - peak_start) / (TensorT)2 + peak_start); TensorT sigma = random_bounds(emg_sigma.first, emg_sigma.second); emgs_O.push_back(EMGModel<TensorT>(h, tau, mu, sigma)); } // make the chromatogram makeChromatogram(x_O, y_O, peaks_LR, peak_apices, peaks, emgs_O); makeChromatogram(x_noise_O, y_noise_O, std::vector<std::pair<TensorT, TensorT>>() = {}, std::vector<TensorT>() = {}, peaks_noise, emgs_O); } template <typename TensorT> void ChromatogramSimulator<TensorT>::makeChromatogram(std::vector<TensorT>& x_O, std::vector<TensorT>& y_O, std::vector<std::pair<TensorT, TensorT>>& peaks_LR, std::vector<TensorT>& peak_apices, const std::vector<PeakSimulator<TensorT>>& peaks, const std::vector<EMGModel<TensorT>>& emgs) const { // check vector lengths if (peaks.size() != emgs.size()) { std::cout << "Length of peaks vectors is not equal to length of EMGs vector!" << std::endl; std::cout << "There are " << peaks.size() << " peaks and " << emgs.size() << " EMGs." << std::endl; return; } // clear any potential input in x and y vectors x_O.clear(); y_O.clear(); peaks_LR.clear(); peak_apices.clear(); // Order the list of peaks from lowest to highest emg_mu std::vector<std::pair<PeakSimulator<TensorT>, EMGModel<TensorT>>> peak_emg_pairs; for (int i = 0; i < emgs.size(); ++i) { const std::pair<PeakSimulator<TensorT>, EMGModel<TensorT>> peak_emg(peaks[i], emgs[i]); peak_emg_pairs.push_back(peak_emg); } std::sort(peak_emg_pairs.begin(), peak_emg_pairs.end(), [](std::pair<PeakSimulator<TensorT>, EMGModel<TensorT>> lhs, std::pair<PeakSimulator<TensorT>, EMGModel<TensorT>> rhs) { return lhs.second.getMu() < rhs.second.getMu(); //ascending order } ); // Join the peaks in order if (peak_emg_pairs.size() > 1) { for (int i = 1; i < peak_emg_pairs.size(); ++i) { joinPeakWindows(peak_emg_pairs[i - 1].first, peak_emg_pairs[i - 1].second, peak_emg_pairs[i].first, peak_emg_pairs[i].second); } } // Add the peaks in order for (int i = 0; i < peak_emg_pairs.size(); ++i) { // make the first peak std::vector<TensorT> x, y; peak_emg_pairs[i].first.simulatePeak(x, y, peak_emg_pairs[i].second); // extract out the peak apex peak_apices.push_back(peak_emg_pairs[i].second.getMu()); // Determine the best left/right std::pair<TensorT, TensorT> best_lr = peak_emg_pairs[i].first.getBestLeftAndRight(x, y, peak_emg_pairs[i].second.getMu()); if (best_lr.first != best_lr.second != 0) peaks_LR.push_back(best_lr); // extend the chromatogram x_O.reserve(x_O.size() + distance(x.begin(), x.end())); x_O.insert(x_O.end(), x.begin(), x.end()); y_O.reserve(y_O.size() + distance(y.begin(), y.end())); y_O.insert(y_O.end(), y.begin(), y.end()); } } } #endif //EVONET_CHROMATOGRAMSIMULATOR_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_WEIGHT_H #define EVONET_WEIGHT_H // .h #include <EvoNet/ml/Solver.h> #include <EvoNet/ml/WeightInit.h> #include <unsupported/Eigen/CXX11/Tensor> #include <memory> #include <tuple> #include <string> #include <cereal/access.hpp> // serialiation of private members #include <cereal/types/memory.hpp> #include <cereal/types/map.hpp> #include <cereal/types/tuple.hpp> #include <cereal/types/utility.hpp> // std::pair #include <cereal/types/vector.hpp> // .cpp #include <vector> #include <cmath> #include <iostream> namespace EvoNet { /** @brief Directed Network Weight */ template<typename TensorT> class Weight { public: Weight() = default; ///< Default constructor Weight(const Weight& other); ///< Copy constructor // [TODO: add test] Weight(const int& id); ///< Explicit constructor Weight(const std::string& name); ///< Explicit constructor Weight(const int& id, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver); ///< Explicit constructor Weight(const std::string& name, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver); ///< Explicit constructor ~Weight() = default; ///< Default destructor inline bool operator==(const Weight& other) const { return std::tie( id_, name_, weight_, init_weight_, //weight_init_->getName(), //solver_->getName(), module_id_, module_name_, tensor_index_ ) == std::tie( other.id_, other.name_, other.weight_, other.init_weight_, //other.weight_init_->getName(), //other.solver_->getName(), other.module_id_, other.module_name_, other.tensor_index_ ) ; } inline bool operator!=(const Weight& other) const { return !(*this == other); } inline Weight& operator=(const Weight& other) { // [TODO: add test] id_ = other.id_; name_ = other.name_; module_id_ = other.module_id_; module_name_ = other.module_name_; layer_name_ = other.layer_name_; tensor_index_ = other.tensor_index_; weight_ = other.weight_; init_weight_ = other.init_weight_; weight_init_ = other.weight_init_; solver_ = other.solver_; weight_min_ = other.weight_min_; weight_max_ = other.weight_max_; drop_probability_ = other.drop_probability_; drop_ = other.drop_; return *this; } void setId(const int& id); ///< id setter int getId() const; ///< id getter void setName(const std::string& name); ///< naem setter std::string getName() const; ///< name getter void setWeight(const TensorT& weight); ///< weight setter TensorT getWeight() const; ///< weight getter void setWeightInitOp(const std::shared_ptr<WeightInitOp<TensorT>>& weight_init); ///< weight initialization operator setter WeightInitOp<TensorT>* getWeightInitOp() const; ///< weight initialization operator getter void setSolverOp(const std::shared_ptr<SolverOp<TensorT>>& solver); ///< weight update operator setter std::shared_ptr<SolverOp<TensorT>> getSolverOpShared() const; ///< weight update operator getter SolverOp<TensorT>* getSolverOp() const; ///< weight update operator getter void setWeightMin(const TensorT& weight_min); ///< min weight setter void setWeightMax(const TensorT& weight_max); ///< max weight setter void setModuleId(const int& module_id); ///< module id setter int getModuleId() const; ///< module id getter void setModuleName(const std::string& module_name); ///< module name setter std::string getModuleName() const; ///< module name getter void setDropProbability(const TensorT& drop_probability); ///< drop_probability setter TensorT getDropProbability() const; ///< drop_probability getter void setDrop(const TensorT& drop); ///< drop setter TensorT getDrop() const; ///< drop getter void setInitWeight(const bool& drop); ///< init_weight setter bool getInitWeight() const; ///< init_weight getter void addTensorIndex(const std::tuple<int, int, int>& layer_id); ///< layer id setter std::vector<std::tuple<int, int, int>> getTensorIndex() const; ///< layer id getter void clearTensorIndex(); void setLayerName(const std::string& layer_name); ///< layer name setter std::string getLayerName() const; ///< layer name getter /** @brief Initializes the weight. */ void initWeight(); private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(id_, name_, module_id_, module_name_, tensor_index_, layer_name_, weight_, init_weight_, weight_init_, solver_, weight_min_, weight_max_); } int id_ = -1; ///< Weight ID std::string name_ = ""; ///< Weight Name int module_id_ = -1; ///< Module ID std::string module_name_ = ""; ///<Module Name std::string layer_name_ = ""; ///< Layer name std::vector<std::tuple<int, int, int>> tensor_index_; ///< Layer ID: tuple consisting of OperationsList index and source/sink Layer index(used internally by Model) std::shared_ptr<WeightInitOp<TensorT>> weight_init_; ///< weight initialization operator std::shared_ptr<SolverOp<TensorT>> solver_; ///< weight update operator TensorT weight_ = TensorT(1); bool init_weight_ = true; ///< whether to initialize the weight or use the provided value of `weight_` TensorT weight_min_ = TensorT(-1.0e6); TensorT weight_max_ = TensorT(1.0e6); TensorT drop_probability_ = TensorT(0.0); TensorT drop_ = TensorT(1); }; template<typename TensorT> inline Weight<TensorT>::Weight(const Weight<TensorT>& other) { id_ = other.id_; name_ = other.name_; weight_ = other.weight_; init_weight_ = other.init_weight_; module_id_ = other.module_id_; module_name_ = other.module_name_; layer_name_ = other.layer_name_; tensor_index_ = other.tensor_index_; setWeightInitOp(std::shared_ptr<WeightInitOp<TensorT>>(other.weight_init_.get()->copy())); setSolverOp(std::shared_ptr<SolverOp<TensorT>>(other.solver_.get()->copy())); weight_min_ = other.weight_min_; weight_max_ = other.weight_max_; drop_probability_ = other.drop_probability_; drop_ = other.drop_; } template<typename TensorT> inline Weight<TensorT>::Weight(const int& id) : id_(id) { if (name_ == "") { name_ = std::to_string(id); } } template<typename TensorT> inline Weight<TensorT>::Weight(const std::string& name) : name_(name) { } template<typename TensorT> inline Weight<TensorT>::Weight(const int& id, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver) : id_(id) { if (name_ == "") { name_ = std::to_string(id); } setWeightInitOp(weight_init); setSolverOp(solver); } template<typename TensorT> inline Weight<TensorT>::Weight(const std::string& name, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver) : name_(name) { setWeightInitOp(weight_init); setSolverOp(solver); } template<typename TensorT> inline void Weight<TensorT>::setId(const int& id) { id_ = id; if (name_ == "") { name_ = std::to_string(id); } } template<typename TensorT> inline int Weight<TensorT>::getId() const { return id_; } template<typename TensorT> inline void Weight<TensorT>::setName(const std::string& name) { name_ = name; } template<typename TensorT> inline std::string Weight<TensorT>::getName() const { return name_; } //template<typename TensorT> //void Weight<TensorT>::setWeight(const TensorT& weight) //{ // weight_data_->setWeight(weight); //} //template<typename TensorT> //TensorT Weight<TensorT>::getWeightView() const //{ // return weight_data_->getWeight()(0); // //return weight_ * getDrop(); //} //template<typename TensorT> //TensorT Weight<TensorT>::getWeight() //{ // return weight_data_->getWeight()(0); // //return weight_ * getDrop(); //} template<typename TensorT> inline void Weight<TensorT>::setWeight(const TensorT& weight) { weight_ = weight; } template<typename TensorT> inline TensorT Weight<TensorT>::getWeight() const { return weight_; } template<typename TensorT> inline void Weight<TensorT>::setWeightInitOp(const std::shared_ptr<WeightInitOp<TensorT>>& weight_init) { weight_init_.reset(); weight_init_ = std::move(weight_init); } template<typename TensorT> inline WeightInitOp<TensorT>* Weight<TensorT>::getWeightInitOp() const { return weight_init_.get(); } template<typename TensorT> inline void Weight<TensorT>::setSolverOp(const std::shared_ptr<SolverOp<TensorT>>& solver) { solver_.reset(); solver_ = std::move(solver); } template<typename TensorT> inline std::shared_ptr<SolverOp<TensorT>> Weight<TensorT>::getSolverOpShared() const { return solver_; } template<typename TensorT> inline SolverOp<TensorT>* Weight<TensorT>::getSolverOp() const { return solver_.get(); } template<typename TensorT> inline void Weight<TensorT>::setWeightMin(const TensorT& weight_min) { weight_min_ = weight_min; } template<typename TensorT> inline void Weight<TensorT>::setWeightMax(const TensorT& weight_max) { weight_max_ = weight_max; } template<typename TensorT> inline void Weight<TensorT>::setModuleId(const int & module_id) { module_id_ = module_id; } template<typename TensorT> inline int Weight<TensorT>::getModuleId() const { return module_id_; } template<typename TensorT> inline void Weight<TensorT>::setModuleName(const std::string & module_name) { module_name_ = module_name; } template<typename TensorT> inline std::string Weight<TensorT>::getModuleName() const { return module_name_; } template<typename TensorT> inline void Weight<TensorT>::setLayerName(const std::string & layer_name) { layer_name_ = layer_name; } template<typename TensorT> inline std::string Weight<TensorT>::getLayerName() const { return layer_name_; } template<typename TensorT> inline void Weight<TensorT>::setDropProbability(const TensorT & drop_probability) { drop_probability_ = drop_probability; //RandBinaryOp<TensorT> rand_bin(drop_probability_); //setDrop(rand_bin((TensorT)1)); } template<typename TensorT> inline TensorT Weight<TensorT>::getDropProbability() const { return drop_probability_; } template<typename TensorT> inline void Weight<TensorT>::setDrop(const TensorT & drop) { drop_ = drop; } template<typename TensorT> inline TensorT Weight<TensorT>::getDrop() const { return drop_; } template<typename TensorT> inline void Weight<TensorT>::setInitWeight(const bool & init_weight) { init_weight_ = init_weight; } template<typename TensorT> inline bool Weight<TensorT>::getInitWeight() const { return init_weight_; } template<typename TensorT> inline void Weight<TensorT>::addTensorIndex(const std::tuple<int, int, int>& layer_id) { tensor_index_.push_back(layer_id); } template<typename TensorT> inline std::vector<std::tuple<int, int, int>> Weight<TensorT>::getTensorIndex() const { return tensor_index_; } template<typename TensorT> inline void Weight<TensorT>::clearTensorIndex() { tensor_index_.clear(); } template<typename TensorT> inline void Weight<TensorT>::initWeight() { weight_ = weight_init_->operator()(); } } #endif //EVONET_WEIGHT_H<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/simulator/MNISTSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: /* @brief Multi-head self-attention dot product classifier @param[in, out] model The network model @param[in] n_inputs The number of pixel input nodes @param[in] n_outputs The number of classification output nodes @param[in] n_heads A vector of the the number of attention heads per attention layer @param[in] key_query_values_lengths A vector of the key/query/values lengths per attention layer @param[in] model_lengths A vector of the attention model lengths per attention layer @param[in] add_FC Optional fully connected layer between attention heads @param[in] add_skip Optional skip connections between attention layers @param[in] add_feature_norm Optional normalization layer between attention layers */ void makeMultiHeadDotProdAttention(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, std::vector<int> n_heads = { 8, 8 }, std::vector<int> key_query_values_lengths = { 48, 24 }, std::vector<int> model_lengths = { 48, 24 }, bool add_FC = true, bool add_skip = true, bool add_feature_norm = false, bool specify_layers = true) { model.setId(0); model.setName("DotProdAttent"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` std::shared_ptr<ActivationOp<TensorT>> activation, activation_grad; if (add_feature_norm) { activation = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } else { activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); } std::shared_ptr<ActivationOp<TensorT>> activation_feature_norm = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_feature_norm_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-3, 0.9, 0.999, 1e-8, 10)); // Multi-head attention std::vector<std::string> node_names; for (size_t i = 0; i < n_heads.size(); ++i) { // Add the attention std::string name_head1 = "Attention" + std::to_string(i); node_names = model_builder.addMultiHeadAttention(model, name_head1, name_head1, node_names_input, node_names_input, node_names_input, n_heads[i], "DotProd", model_lengths[i], key_query_values_lengths[i], key_query_values_lengths[i], activation, activation_grad, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names_input.size(), 2)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { std::string norm_name = "Norm" + std::to_string(i); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); node_names = model_builder.addSinglyConnected(model, norm_name + "-gain", norm_name + "-gain", node_names, node_names.size(), activation_feature_norm, activation_feature_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } node_names_input = node_names; // Add the feedforward net if (add_FC) { std::string norm_name = "FC" + std::to_string(i); node_names = model_builder.addFullyConnected(model, norm_name, norm_name, node_names_input, n_inputs, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names_input.size(), 2)), solver_op, 0.0f, 0.0f, false, specify_layers); } if (add_skip) { std::string skip_name = "Skip_FC" + std::to_string(i); model_builder.addSinglyConnected(model, skip_name, node_names_input, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(n_inputs, 2)), solver_op, 0.0f); } if (add_feature_norm) { std::string norm_name = "Norm_FC" + std::to_string(i); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); node_names = model_builder.addSinglyConnected(model, norm_name + "-gain", norm_name + "-gain", node_names, node_names.size(), activation_feature_norm, activation_feature_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } node_names_input = node_names; } // Add the FC layer node_names = model_builder.addFullyConnected(model, "FC-Out", "FC-Out", node_names, n_outputs, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 2)), solver_op, 0.0f, 0.0f, false, true); // Add the actual output nodes node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); } void adaptiveTrainerScheduler( const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<float>& model_errors) { if (n_epochs % 1000 == 0 /*&& n_epochs != 0*/) { // save the model every 1000 epochs model_interpreter.getModelResults(model, false, true, false, false); //// Save to .csv //data.storeModelCsv(model.getName() + "_" + std::to_string(n_epochs) + "_nodes.csv", // model.getName() + "_" + std::to_string(n_epochs) + "_links.csv", // model.getName() + "_" + std::to_string(n_epochs) + "_weights.csv", model); // Save to binary ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } } void trainingModelLogger(const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) override { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); model_logger.setLogNodeOutputsEpoch(false); model_logger.setLogNodeInputsEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1000 == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_interpreter.getModelResults(model, true, false, false, false); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->metric_names_) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } }; template<typename TensorT> class DataSimulatorExt : public MNISTSimulator<TensorT> { public: void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps)override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); assert(n_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == this->validation_data.dimension(1)); // make a vector of sample_indices Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, n_epochs); // Reformat the input data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { for (int nodes_iter = 0; nodes_iter < this->training_data.dimension(1); ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->training_data(sample_indices[epochs_iter*batch_size + batch_iter], nodes_iter); //input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->training_data(sample_indices[0], nodes_iter); // test on only 1 sample } for (int nodes_iter = 0; nodes_iter < this->training_labels.dimension(1); ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->training_labels(sample_indices[epochs_iter*batch_size + batch_iter], nodes_iter); //output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->training_labels(sample_indices[0], nodes_iter); // test on only 1 sample } } } } time_steps.setConstant(1.0f); } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps)override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); assert(n_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == this->validation_data.dimension(1)); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, n_epochs); // Reformat the input data for validation for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { for (int nodes_iter = 0; nodes_iter < this->validation_data.dimension(1); ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->validation_data(sample_indices[epochs_iter*batch_size + batch_iter], nodes_iter); } for (int nodes_iter = 0; nodes_iter < this->validation_labels.dimension(1); ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->validation_labels(sample_indices[epochs_iter*batch_size + batch_iter], nodes_iter); } } } } time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps)override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); assert(n_output_nodes == 2 * this->training_labels.dimension(1)); assert(n_metric_output_nodes == this->training_labels.dimension(1)); assert(n_input_nodes == 784); assert(memory_size == 1); // make the start and end sample indices [BUG FREE] Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, 1); // Reformat the input data for training [BUG FREE] for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], nodes_iter); } for (int nodes_iter = 0; nodes_iter < this->training_labels.dimension(1); ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter + this->training_labels.dimension(1)) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); } } } } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps)override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); assert(n_output_nodes == 2 * this->validation_labels.dimension(1)); assert(n_metric_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == 784); assert(memory_size == 1); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, 1); // Reformat the input data for validation for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], nodes_iter); } for (int nodes_iter = 0; nodes_iter < this->validation_labels.dimension(1); ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter + this->validation_labels.dimension(1)) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); } } } } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> {}; /** @brief Image classification MNIST example whereby all pixels are linearized and read into the model. The model then attempts to classify the image using a Dot product attention architecture Data processing: - whole image pixels (linearized) 28x28 normalized to 0 to 1 - classifier (1 hot vector from 0 to 9) */ void main_MNIST(const std::string& data_dir, const bool& make_model, const bool& train_model) { const int n_hard_threads = std::thread::hardware_concurrency(); const int n_threads = 1; // define the populatin trainer PopulationTrainerExt<float> population_trainer; population_trainer.setNGenerations(1); population_trainer.setLogging(false); // define the population logger PopulationLogger<float> population_logger(true, true); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false, false); // define the data simulator const std::size_t input_size = 784; const std::size_t training_data_size = 60000; //60000; const std::size_t validation_data_size = 10000; //10000; DataSimulatorExt<float> data_simulator; // read in the training data std::string training_data_filename = data_dir + "train-images.idx3-ubyte"; std::string training_labels_filename = data_dir + "train-labels.idx1-ubyte"; data_simulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // read in the validation data std::string validation_data_filename = data_dir + "t10k-images.idx3-ubyte"; std::string validation_labels_filename = data_dir + "t10k-labels.idx1-ubyte"; data_simulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); data_simulator.unitScaleData(); // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the output nodes std::vector<std::string> output_nodes; for (int i = 0; i < data_simulator.mnist_labels.size(); ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(128); model_trainer.setMemorySize(1); model_trainer.setNEpochsTraining(100001); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true); model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setLossFunctions({ std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-24, 0.0)), std::make_shared<CrossEntropyWithLogitsLossOp<float>>(CrossEntropyWithLogitsLossOp<float>()) }); model_trainer.setLossFunctionGrads({ std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-24, 0.0)), std::make_shared<CrossEntropyWithLogitsLossGradOp<float>>(CrossEntropyWithLogitsLossGradOp<float>()) }); model_trainer.setLossOutputNodes({ output_nodes, output_nodes }); model_trainer.setMetricFunctions({ std::make_shared<PrecisionMCMicroOp<float>>(PrecisionMCMicroOp<float>()) }); model_trainer.setMetricOutputNodes({ output_nodes }); model_trainer.setMetricNames({ "PrecisionMCMicro" }); // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; // define the initial population std::cout << "Initializing the population..." << std::endl; Model<float> model; if (make_model) { model_trainer.makeMultiHeadDotProdAttention(model, input_nodes.size(), output_nodes.size(), { 2,2 }, { 24,24 }, { 48, 48 }, true, false, true, true); // Test model //model_trainer.makeMultiHeadDotProdAttention(model, input_nodes.size(), output_nodes.size(), { 8, 8 }, { 48, 24 }, { 256, 128 }, false, false, false, true); // Solving model //model_trainer.makeMultiHeadDotProdAttention(model, input_nodes.size(), output_nodes.size(), { 12, 8 }, { 48, 24 }, { 512, 128 }, false, false, false); // Solving model } else { // read in the trained model std::cout << "Reading in the model..." << std::endl; const std::string model_filename = data_dir + "DotProdAtt_model.binary"; const std::string interpreter_filename = data_dir + "DotProdAtt_interpreter.binary"; ModelFile<float> model_file; model_file.loadModelBinary(model_filename, model); model.setId(1); model.setName("DotProdAtt1"); ModelInterpreterFileDefaultDevice<float> model_interpreter_file; model_interpreter_file.loadModelInterpreterBinary(interpreter_filename, model_interpreters[0]); } //std::vector<Model<float>> population = { model }; if (train_model) { // Train the model std::cout << "Training the model..." << std::endl; std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); //// Evolve the population //std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); //PopulationTrainerFile<float> population_trainer_file; //population_trainer_file.storeModels(population, "MNIST"); //population_trainer_file.storeModelValidations("MNISTErrors.csv", models_validation_errors_per_generation); } else { //// Evaluate the population //population_trainer.evaluateModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, input_nodes); } } int main(int argc, char** argv) { // Parse the user commands std::string data_dir = "C:/Users/dmccloskey/Documents/GitHub/mnist/"; //std::string data_dir = "/home/user/data/"; //std::string data_dir = "C:/Users/domccl/GitHub/mnist/"; bool make_model = true, train_model = true; if (argc >= 2) { data_dir = argv[1]; } if (argc >= 3) { make_model = (argv[2] == std::string("true")) ? true : false; } if (argc >= 4) { train_model = (argv[3] == std::string("true")) ? true : false; } // run the application main_MNIST(data_dir, make_model, train_model); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_NODEFILE_H #define EVONET_NODEFILE_H // .h #include <EvoNet/ml/Node.h> #include <unsupported/Eigen/CXX11/Tensor> #include <iostream> #include <fstream> #include <vector> // .cpp #include <EvoNet/io/csv.h> #include <EvoNet/io/CSVWriter.h> #include <cereal/archives/binary.hpp> #include <cereal/types/map.hpp> namespace EvoNet { /** @brief NodeFile */ template<typename TensorT> class NodeFile { public: NodeFile() = default; ///< Default constructor ~NodeFile() = default; ///< Default destructor /** @brief Load nodes from file @param filename The name of the nodes file @param nodes The nodes to load data into @returns Status True on success, False if not */ bool loadNodesBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Node<TensorT>>>& nodes); bool loadNodesCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Node<TensorT>>>& nodes); /** @brief Load nodes from file @param filename The name of the nodes file @param nodes The nodes to load data into @returns Status True on success, False if not */ bool storeNodesBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Node<TensorT>>>& nodes); bool storeNodesCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Node<TensorT>>>& nodes); }; template<typename TensorT> inline bool NodeFile<TensorT>::loadNodesBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Node<TensorT>>>& nodes) { std::ifstream ifs(filename, std::ios::binary); if (ifs.is_open()) { cereal::BinaryInputArchive iarchive(ifs); iarchive(nodes); ifs.close(); } return true; } template<typename TensorT> inline bool NodeFile<TensorT>::loadNodesCsv(const std::string & filename, std::map<std::string, std::shared_ptr<Node<TensorT>>>& nodes) { nodes.clear(); io::CSVReader<12> nodes_in(filename); nodes_in.read_header(io::ignore_extra_column, "node_name", "node_type", "node_status", "node_activation", "node_activation_grad", "node_integration", "node_integration_error", "node_integration_weight_grad", "module_name", "layer_name", "tensor_number", "tensor_position"); std::string node_name, node_type_str, node_status_str, node_activation_str, node_activation_grad_str, node_integration_str, node_integration_error_str, node_integration_weight_grad_str, module_name_str, layer_name_str, tensor_number_str, tensor_position_str = ""; while (nodes_in.read_row(node_name, node_type_str, node_status_str, node_activation_str, node_activation_grad_str, node_integration_str, node_integration_error_str, node_integration_weight_grad_str, module_name_str, layer_name_str, tensor_number_str, tensor_position_str)) { // parse the node_type NodeType node_type; if (node_type_str == "hidden") node_type = NodeType::hidden; else if (node_type_str == "output") node_type = NodeType::output; else if (node_type_str == "input") node_type = NodeType::input; else if (node_type_str == "bias") node_type = NodeType::bias; else if (node_type_str == "recursive") node_type = NodeType::recursive; else if (node_type_str == "unmodifiable") node_type = NodeType::unmodifiable; else if (node_type_str == "zero") node_type = NodeType::zero; else std::cout << "NodeType for node_name " << node_name << " was not recognized." << std::endl; // parse the node_status NodeStatus node_status; if (node_status_str == "deactivated") node_status = NodeStatus::deactivated; else if (node_status_str == "initialized") node_status = NodeStatus::initialized; else if (node_status_str == "activated") node_status = NodeStatus::activated; else if (node_status_str == "corrected") node_status = NodeStatus::corrected; else std::cout << "NodeStatus for node_name " << node_name << " was not recognized." << std::endl; // parse the node_activation std::shared_ptr<ActivationOp<TensorT>> node_activation; if (node_activation_str == "ReLUOp") node_activation.reset(new ReLUOp<TensorT>()); else if (node_activation_str == "ELUOp") node_activation.reset(new ELUOp<TensorT>()); else if (node_activation_str == "LinearOp") node_activation.reset(new LinearOp<TensorT>()); else if (node_activation_str == "SigmoidOp") node_activation.reset(new SigmoidOp<TensorT>()); else if (node_activation_str == "TanHOp") node_activation.reset(new TanHOp<TensorT>()); else if (node_activation_str == "ExponentialOp") node_activation.reset(new ExponentialOp<TensorT>()); else if (node_activation_str == "InverseOp") node_activation.reset(new InverseOp<TensorT>()); else if (node_activation_str == "LinearOp") node_activation.reset(new LinearOp<TensorT>()); else if (node_activation_str == "LeakyReLUOp") node_activation.reset(new LeakyReLUOp<TensorT>()); // TODO define values else if (node_activation_str == "PowOp") node_activation.reset(new PowOp<TensorT>()); // TODO define values else std::cout << "NodeActivation for node_name " << node_name << " was not recognized." << std::endl; // parse the node_activation std::shared_ptr<ActivationOp<TensorT>> node_activation_grad; if (node_activation_grad_str == "ReLUGradOp") node_activation_grad.reset(new ReLUGradOp<TensorT>()); else if (node_activation_grad_str == "ELUGradOp") node_activation_grad.reset(new ELUGradOp<TensorT>()); else if (node_activation_grad_str == "LinearGradOp") node_activation_grad.reset(new LinearGradOp<TensorT>()); else if (node_activation_grad_str == "SigmoidGradOp") node_activation_grad.reset(new SigmoidGradOp<TensorT>()); else if (node_activation_grad_str == "TanHGradOp") node_activation_grad.reset(new TanHGradOp<TensorT>()); else if (node_activation_grad_str == "ExponentialGradOp") node_activation_grad.reset(new ExponentialGradOp<TensorT>()); else if (node_activation_grad_str == "InverseGradOp") node_activation_grad.reset(new InverseGradOp<TensorT>()); else if (node_activation_grad_str == "LinearGradOp") node_activation_grad.reset(new LinearGradOp<TensorT>()); else if (node_activation_grad_str == "LeakyReLUGradOp") node_activation_grad.reset(new LeakyReLUGradOp<TensorT>()); else if (node_activation_grad_str == "PowGradOp") node_activation_grad.reset(new PowGradOp<TensorT>()); else std::cout << "NodeActivationGrad for node_name " << node_name << " was not recognized." << std::endl; // parse the node_integration std::shared_ptr<IntegrationOp<TensorT>> node_integration; if (node_integration_str == "SumOp") node_integration.reset(new SumOp<TensorT>()); else if (node_integration_str == "ProdOp") node_integration.reset(new ProdOp<TensorT>()); else if (node_integration_str == "MaxOp") node_integration.reset(new MaxOp<TensorT>()); else if (node_integration_str == "MeanOp") node_integration.reset(new MeanOp<TensorT>()); else if (node_integration_str == "VarModOp") node_integration.reset(new VarModOp<TensorT>()); else if (node_integration_str == "CountOp") node_integration.reset(new CountOp<TensorT>()); else std::cout << "NodeIntegration for node_name " << node_name << " was not recognized." << std::endl; // parse the node_integration_error std::shared_ptr<IntegrationErrorOp<TensorT>> node_integration_error; if (node_integration_error_str == "SumErrorOp") node_integration_error.reset(new SumErrorOp<TensorT>()); else if (node_integration_error_str == "ProdErrorOp") node_integration_error.reset(new ProdErrorOp<TensorT>()); else if (node_integration_error_str == "MaxErrorOp") node_integration_error.reset(new MaxErrorOp<TensorT>()); else if (node_integration_error_str == "MeanErrorOp") node_integration_error.reset(new MeanErrorOp<TensorT>()); else if (node_integration_error_str == "VarModErrorOp") node_integration_error.reset(new VarModErrorOp<TensorT>()); else if (node_integration_error_str == "CountErrorOp") node_integration_error.reset(new CountErrorOp<TensorT>()); else std::cout << "NodeIntegrationError for node_name " << node_name << " was not recognized." << std::endl; // parse the node_integration_weight_grad std::shared_ptr<IntegrationWeightGradOp<TensorT>> node_integration_weight_grad; if (node_integration_weight_grad_str == "SumWeightGradOp") node_integration_weight_grad.reset(new SumWeightGradOp<TensorT>()); else if (node_integration_weight_grad_str == "ProdWeightGradOp") node_integration_weight_grad.reset(new ProdWeightGradOp<TensorT>()); else if (node_integration_weight_grad_str == "MaxWeightGradOp") node_integration_weight_grad.reset(new MaxWeightGradOp<TensorT>()); else if (node_integration_weight_grad_str == "MeanWeightGradOp") node_integration_weight_grad.reset(new MeanWeightGradOp<TensorT>()); else if (node_integration_weight_grad_str == "VarModWeightGradOp") node_integration_weight_grad.reset(new VarModWeightGradOp<TensorT>()); else if (node_integration_weight_grad_str == "CountWeightGradOp") node_integration_weight_grad.reset(new CountWeightGradOp<TensorT>()); else std::cout << "NodeIntegrationWeightGrad for node_name " << node_name << " was not recognized." << std::endl; std::shared_ptr<Node<TensorT>> node(new Node<TensorT>(node_name, node_type, node_status, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad)); // parse tensor specific information node->setModuleName(module_name_str); node->setLayerName(layer_name_str); int tensor_number = -1, tensor_position = -1; tensor_number = std::stoi(tensor_number_str); tensor_position = std::stoi(tensor_position_str); node->setTensorIndex(std::make_pair(tensor_number, tensor_position)); nodes.emplace(node_name,node); } return true; } template<typename TensorT> inline bool NodeFile<TensorT>::storeNodesBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Node<TensorT>>>& nodes) { std::ofstream ofs(filename, std::ios::binary | std::ios::out | std::ios::trunc); //if (ofs.is_open() == false) { // Lines check to make sure the file is not already created cereal::BinaryOutputArchive oarchive(ofs); oarchive(nodes); ofs.close(); //} // Lines check to make sure the file is not already created return true; } template<typename TensorT> inline bool NodeFile<TensorT>::storeNodesCsv(const std::string & filename, std::map<std::string, std::shared_ptr<Node<TensorT>>>& nodes) { CSVWriter csvwriter(filename); // write the headers to the first line const std::vector<std::string> headers = { "node_name", "node_type", "node_status", "node_activation", "node_activation_grad", "node_integration", "node_integration_error", "node_integration_weight_grad", "module_name", "layer_name", "tensor_number", "tensor_position" }; csvwriter.writeDataInRow(headers.begin(), headers.end()); for (const auto& node : nodes) { std::vector<std::string> row; row.push_back(node.second->getName()); // parse the node_type std::string node_type_str = ""; if (node.second->getType() == NodeType::hidden) node_type_str = "hidden"; else if (node.second->getType() == NodeType::output) node_type_str = "output"; else if (node.second->getType() == NodeType::input) node_type_str = "input"; else if (node.second->getType() == NodeType::bias) node_type_str = "bias"; else if (node.second->getType() == NodeType::recursive) node_type_str = "recursive"; else if (node.second->getType() == NodeType::unmodifiable) node_type_str = "unmodifiable"; else if (node.second->getType() == NodeType::zero) node_type_str = "zero"; else std::cout << "NodeType for node_name " << node.second->getName() << " was not recognized." << std::endl; row.push_back(node_type_str); // parse the node_status std::string node_status_str = ""; if (node.second->getStatus() == NodeStatus::deactivated) node_status_str = "deactivated"; else if (node.second->getStatus() == NodeStatus::initialized) node_status_str = "initialized"; else if (node.second->getStatus() == NodeStatus::activated) node_status_str = "activated"; else if (node.second->getStatus() == NodeStatus::corrected) node_status_str = "corrected"; else std::cout << "NodeStatus for node_name " << node.second->getName() << " was not recognized." << std::endl; row.push_back(node_status_str); // parse the node_activation std::string node_activation_str = node.second->getActivation()->getName(); row.push_back(node_activation_str); std::string node_activation_grad_str = node.second->getActivationGrad()->getName(); row.push_back(node_activation_grad_str); // parse the node_integration std::string node_integration_str = node.second->getIntegration()->getName(); row.push_back(node_integration_str); std::string node_integration_error_str = node.second->getIntegrationError()->getName(); row.push_back(node_integration_error_str); std::string node_integration_weight_grad_str = node.second->getIntegrationWeightGrad()->getName(); row.push_back(node_integration_weight_grad_str); row.push_back(node.second->getModuleName()); row.push_back(node.second->getLayerName()); row.push_back(std::to_string(node.second->getTensorIndex().first)); row.push_back(std::to_string(node.second->getTensorIndex().second)); // write to file csvwriter.writeDataInRow(row.begin(), row.end()); } return true; } } #endif //EVONET_NODEFILE_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_EMGMODEL_H #define EVONET_EMGMODEL_H //.cpp #include <cmath> namespace EvoNet { /** @brief A class to generate points following an EMG distribution. References: <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>. (2011). "Reconstruction of chromatographic peaks using the exponentially modified Gaussian function". Journal of Chemometrics. 25 (7): 352. doi:10.1002/cem.1343 <NAME> (1985). "Series for the Exponentially Modified Gaussian Peak Shape". Anal. Chem. 57: 388. doi:10.1021/ac00279a094. <NAME>. (1998). Chromatographic Integration Methods. Royal Society of Chemistry, Information Services. p. 27. ISBN 9780854045105. Retrieved 2015-05-15. */ template <typename TensorT> class EMGModel { /** Notes on potential optimizations: 1. make a virtual class called StatisticalModel 2. make a virtual class called PDF 3. make a virtual class called CDF 4. setters/getters would be unique to each derived class */ public: EMGModel() = default; ///< Default constructor EMGModel(const TensorT& h, const TensorT& tau, const TensorT& mu, const TensorT& sigma); ///< Explicit constructor ~EMGModel() = default; ///< Default destructor void setH(const TensorT& h); ///< EMG h setter TensorT getH() const; ///< EMG h getter void setTau(const TensorT& tau); ///< EMG tau setter TensorT getTau() const; ///< EMG tau getter void setMu(const TensorT& mu); ///< EMG mu setter TensorT getMu() const; ///< EMG mu getter void setSigma(const TensorT& sigma); ///< EMG sigma setter TensorT getSigma() const; ///< EMG sigma getter /** @brief Calculates points from an EMG PDF @param[in] x_I X value of the EMG PDF @returns Y value of the EMG PDF. */ TensorT PDF(const TensorT& x_I) const; protected: /** @brief Calculates points from an EMG PDF using method 1 @param[in] x_I X value of the EMG PDF @returns Y value of the EMG PDF. */ TensorT EMGPDF1_(const TensorT& x_I) const; /** @brief Calculates points from an EMG PDF using method 2 @param[in] x_I X value of the EMG PDF @returns Y value of the EMG PDF. */ TensorT EMGPDF2_(const TensorT& x_I) const; /** @brief Calculates points from an EMG PDF using method 3 @param[in] x_I X value of the EMG PDF @returns Y value of the EMG PDF. */ TensorT EMGPDF3_(const TensorT& x_I) const; /** @brief Calculates the parameter z, which is used to decide which formulation of the EMG PDF to use for calculation. @param[in] x_I X value of the EMG PDF @returns z parameter. */ TensorT z_(const TensorT& x_I) const; private: TensorT emg_h_ = (TensorT)1.0; ///< Amplitude of the Gaussian peak TensorT emg_tau_ = (TensorT)0.1; ///< Exponential relaxation time TensorT emg_mu_ = (TensorT)0.0; ///< Mean of the EMG TensorT emg_sigma_ = (TensorT)1.0; ///< Standard deviation of the EGM }; template <typename TensorT> EMGModel<TensorT>::EMGModel(const TensorT& h, const TensorT& tau, const TensorT& mu, const TensorT& sigma) { emg_h_ = h; emg_tau_ = tau; emg_mu_ = mu; emg_sigma_ = sigma; } template <typename TensorT> void EMGModel<TensorT>::setH(const TensorT& h) { emg_h_ = h; } template <typename TensorT> TensorT EMGModel<TensorT>::getH() const { return emg_h_; } template <typename TensorT> void EMGModel<TensorT>::setTau(const TensorT& tau) { emg_tau_ = tau; } template <typename TensorT> TensorT EMGModel<TensorT>::getTau() const { return emg_tau_; } template <typename TensorT> void EMGModel<TensorT>::setMu(const TensorT& mu) { emg_mu_ = mu; } template <typename TensorT> TensorT EMGModel<TensorT>::getMu() const { return emg_mu_; } template <typename TensorT> void EMGModel<TensorT>::setSigma(const TensorT& sigma) { emg_sigma_ = sigma; } template <typename TensorT> TensorT EMGModel<TensorT>::getSigma() const { return emg_sigma_; } template <typename TensorT> TensorT EMGModel<TensorT>::z_(const TensorT& x_I) const { TensorT z = TensorT(1 / std::sqrt(2)*(emg_sigma_ / emg_tau_ - (x_I - emg_mu_) / emg_sigma_)); return z; } template <typename TensorT> TensorT EMGModel<TensorT>::EMGPDF1_(const TensorT& x_I) const { const TensorT PI = TensorT(3.141592653589793); const TensorT term1a = TensorT(emg_h_ * emg_sigma_ / emg_tau_ * std::sqrt(PI / 2)); const TensorT term2a = TensorT(0.5*std::pow(emg_sigma_ / emg_tau_, 2) - (x_I - emg_mu_) / emg_tau_); const TensorT term3a = TensorT(1 / sqrt(2)*(emg_sigma_ / emg_tau_ - (x_I - emg_mu_) / emg_sigma_)); const TensorT y = TensorT(term1a * std::exp(term2a)*std::erfc(term3a)); return y; } template <typename TensorT> TensorT EMGModel<TensorT>::EMGPDF2_(const TensorT& x_I) const { const TensorT PI = TensorT(3.141592653589793); const TensorT term1a = TensorT(emg_h_ * emg_sigma_ / emg_tau_ * std::sqrt(PI / 2)); const TensorT term2b = TensorT(-0.5*std::pow((x_I - emg_mu_) / emg_sigma_, 2)); const TensorT term3a = TensorT(1 / sqrt(2)*(emg_sigma_ / emg_tau_ - (x_I - emg_mu_) / emg_sigma_)); const TensorT y = TensorT(term1a * std::exp(term2b)*std::exp(std::pow(term3a, 2))*std::erfc(term3a)); return y; } template <typename TensorT> TensorT EMGModel<TensorT>::EMGPDF3_(const TensorT& x_I) const { const TensorT term1b = TensorT(emg_h_); const TensorT term2b = TensorT(-0.5*std::pow((x_I - emg_mu_) / emg_sigma_, 2)); const TensorT term3b = TensorT(1 - (x_I - emg_mu_)*emg_tau_ / std::pow(emg_sigma_, 2)); const TensorT y = TensorT(term1b * std::exp(term2b) / term3b); return y; } template <typename TensorT> TensorT EMGModel<TensorT>::PDF(const TensorT& x_I) const { const TensorT z = z_(x_I); TensorT y = (TensorT)0; if (z < 0) { y = EMGPDF1_(x_I); } else if (z >= 0 && z <= 6.71e7) { y = EMGPDF2_(x_I); } else if (z > 6.71e7) { y = EMGPDF3_(x_I); } return y; } } #endif //EVONET_EMGMODEL_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE MetricFunction test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/MetricFunction.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(MetricFunction1) /** AccuracyBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorAccuracyBCOp) { AccuracyBCOp<double>* ptrMetFunc = nullptr; AccuracyBCOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorAccuracyBCOp) { AccuracyBCOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new AccuracyBCOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersAccuracyBCOp) { AccuracyBCOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "AccuracyBCOp"); BOOST_CHECK_EQUAL(operation.getParameters().at(0), 0.5); BOOST_CHECK_CLOSE(operation.getClassificationThreshold(), 0.5, 1e-3); AccuracyBCOp<float> operation2(0.1); BOOST_CHECK_CLOSE(operation2.getClassificationThreshold(), 0.1, 1e-3); } /** AccuracyMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorAccuracyMCMicroOp) { AccuracyMCMicroOp<double>* ptrMetFunc = nullptr; AccuracyMCMicroOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorAccuracyMCMicroOp) { AccuracyMCMicroOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new AccuracyMCMicroOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersAccuracyMCMicroOp) { AccuracyMCMicroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "AccuracyMCMicroOp"); } /** AccuracyMCMacroOp Tests */ BOOST_AUTO_TEST_CASE(constructorAccuracyMCMacroOp) { AccuracyMCMacroOp<double>* ptrMetFunc = nullptr; AccuracyMCMacroOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorAccuracyMCMacroOp) { AccuracyMCMacroOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new AccuracyMCMacroOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersAccuracyMCMacroOp) { AccuracyMCMacroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "AccuracyMCMacroOp"); } /** PrecisionBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorPrecisionBCOp) { PrecisionBCOp<double>* ptrMetFunc = nullptr; PrecisionBCOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorPrecisionBCOp) { PrecisionBCOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new PrecisionBCOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersPrecisionBCOp) { PrecisionBCOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "PrecisionBCOp"); BOOST_CHECK_EQUAL(operation.getParameters().at(0), 0.5); BOOST_CHECK_CLOSE(operation.getClassificationThreshold(), 0.5, 1e-3); PrecisionBCOp<float> operation2(0.1); BOOST_CHECK_CLOSE(operation2.getClassificationThreshold(), 0.1, 1e-3); } /** PrecisionMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorPrecisionMCMicroOp) { PrecisionMCMicroOp<double>* ptrMetFunc = nullptr; PrecisionMCMicroOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorPrecisionMCMicroOp) { PrecisionMCMicroOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new PrecisionMCMicroOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersPrecisionMCMicroOp) { PrecisionMCMicroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "PrecisionMCMicroOp"); } /** PrecisionMCMacroOp Tests */ BOOST_AUTO_TEST_CASE(constructorPrecisionMCMacroOp) { PrecisionMCMacroOp<double>* ptrMetFunc = nullptr; PrecisionMCMacroOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorPrecisionMCMacroOp) { PrecisionMCMacroOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new PrecisionMCMacroOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersPrecisionMCMacroOp) { PrecisionMCMacroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "PrecisionMCMacroOp"); } /** RecallBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorRecallBCOp) { RecallBCOp<double>* ptrMetFunc = nullptr; RecallBCOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorRecallBCOp) { RecallBCOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new RecallBCOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersRecallBCOp) { RecallBCOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "RecallBCOp"); BOOST_CHECK_EQUAL(operation.getParameters().at(0), 0.5); BOOST_CHECK_CLOSE(operation.getClassificationThreshold(), 0.5, 1e-3); RecallBCOp<float> operation2(0.1); BOOST_CHECK_CLOSE(operation2.getClassificationThreshold(), 0.1, 1e-3); } /** RecallMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorRecallMCMicroOp) { RecallMCMicroOp<double>* ptrMetFunc = nullptr; RecallMCMicroOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorRecallMCMicroOp) { RecallMCMicroOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new RecallMCMicroOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersRecallMCMicroOp) { RecallMCMicroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "RecallMCMicroOp"); } /** RecallMCMacroOp Tests */ BOOST_AUTO_TEST_CASE(constructorRecallMCMacroOp) { RecallMCMacroOp<double>* ptrMetFunc = nullptr; RecallMCMacroOp<double>* nullPointerMetFunc = nullptr; BOOST_CHECK_EQUAL(ptrMetFunc, nullPointerMetFunc); } BOOST_AUTO_TEST_CASE(destructorRecallMCMacroOp) { RecallMCMacroOp<double>* ptrMetFunc = nullptr; ptrMetFunc = new RecallMCMacroOp<double>(); delete ptrMetFunc; } BOOST_AUTO_TEST_CASE(gettersAndSettersRecallMCMacroOp) { RecallMCMacroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "RecallMCMacroOp"); } /** PredictionBiasOp Tests */ BOOST_AUTO_TEST_CASE(constructorPredictionBiasOp) { PredictionBiasOp<double>* ptrPredictionBias = nullptr; PredictionBiasOp<double>* nullPointerPredictionBias = nullptr; BOOST_CHECK_EQUAL(ptrPredictionBias, nullPointerPredictionBias); } BOOST_AUTO_TEST_CASE(destructorPredictionBiasOp) { PredictionBiasOp<double>* ptrPredictionBias = nullptr; ptrPredictionBias = new PredictionBiasOp<double>(); delete ptrPredictionBias; } BOOST_AUTO_TEST_CASE(gettersAndSettersPredictionBiasOp) { PredictionBiasOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "PredictionBiasOp"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); } /** F1ScoreBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorF1ScoreBCOp) { F1ScoreBCOp<double>* ptrF1Score = nullptr; F1ScoreBCOp<double>* nullPointerF1Score = nullptr; BOOST_CHECK_EQUAL(ptrF1Score, nullPointerF1Score); } BOOST_AUTO_TEST_CASE(destructorF1ScoreBCOp) { F1ScoreBCOp<double>* ptrF1Score = nullptr; ptrF1Score = new F1ScoreBCOp<double>(); delete ptrF1Score; } BOOST_AUTO_TEST_CASE(gettersAndSettersF1ScoreBCOp) { F1ScoreBCOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "F1ScoreBCOp"); BOOST_CHECK_EQUAL(operation.getParameters().at(0), 0.5); BOOST_CHECK_CLOSE(operation.getClassificationThreshold(), 0.5, 1e-3); F1ScoreBCOp<float> operation2(0.1); BOOST_CHECK_CLOSE(operation2.getClassificationThreshold(), 0.1, 1e-3); } /** F1ScoreMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorF1ScoreMCMicroOp) { F1ScoreMCMicroOp<double>* ptrF1Score = nullptr; F1ScoreMCMicroOp<double>* nullPointerF1Score = nullptr; BOOST_CHECK_EQUAL(ptrF1Score, nullPointerF1Score); } BOOST_AUTO_TEST_CASE(destructorF1ScoreMCMicroOp) { F1ScoreMCMicroOp<double>* ptrF1Score = nullptr; ptrF1Score = new F1ScoreMCMicroOp<double>(); delete ptrF1Score; } BOOST_AUTO_TEST_CASE(gettersAndSettersF1ScoreMCMicroOp) { F1ScoreMCMicroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "F1ScoreMCMicroOp"); } /** F1ScoreMCMacroOp Tests */ BOOST_AUTO_TEST_CASE(constructorF1ScoreMCMacroOp) { F1ScoreMCMacroOp<double>* ptrF1Score = nullptr; F1ScoreMCMacroOp<double>* nullPointerF1Score = nullptr; BOOST_CHECK_EQUAL(ptrF1Score, nullPointerF1Score); } BOOST_AUTO_TEST_CASE(destructorF1ScoreMCMacroOp) { F1ScoreMCMacroOp<double>* ptrF1Score = nullptr; ptrF1Score = new F1ScoreMCMacroOp<double>(); delete ptrF1Score; } BOOST_AUTO_TEST_CASE(gettersAndSettersF1ScoreMCMacroOp) { F1ScoreMCMacroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "F1ScoreMCMacroOp"); } /** AUROCOp Tests */ BOOST_AUTO_TEST_CASE(constructorAUROCOp) { AUROCOp<double>* ptrAUROC = nullptr; AUROCOp<double>* nullPointerAUROC = nullptr; BOOST_CHECK_EQUAL(ptrAUROC, nullPointerAUROC); } BOOST_AUTO_TEST_CASE(destructorAUROCOp) { AUROCOp<double>* ptrAUROC = nullptr; ptrAUROC = new AUROCOp<double>(); delete ptrAUROC; } BOOST_AUTO_TEST_CASE(gettersAndSettersAUROCOp) { AUROCOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "AUROCOp"); BOOST_CHECK_EQUAL(operation.getParameters().at(0), 0.5); BOOST_CHECK_CLOSE(operation.getClassificationThreshold(), 0.5, 1e-3); AUROCOp<float> operation2(0.1); BOOST_CHECK_CLOSE(operation2.getClassificationThreshold(), 0.1, 1e-3); } /** MCCBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorMCCBCOp) { MCCBCOp<double>* ptrMCC = nullptr; MCCBCOp<double>* nullPointerMCC = nullptr; BOOST_CHECK_EQUAL(ptrMCC, nullPointerMCC); } BOOST_AUTO_TEST_CASE(destructorMCCBCOp) { MCCBCOp<double>* ptrMCC = nullptr; ptrMCC = new MCCBCOp<double>(); delete ptrMCC; } BOOST_AUTO_TEST_CASE(gettersAndSettersMCCBCOp) { MCCBCOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MCCBCOp"); BOOST_CHECK_EQUAL(operation.getParameters().at(0), 0.5); BOOST_CHECK_CLOSE(operation.getClassificationThreshold(), 0.5, 1e-3); MCCBCOp<float> operation2(0.1); BOOST_CHECK_CLOSE(operation2.getClassificationThreshold(), 0.1, 1e-3); } /** MCCMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorMCCMCMicroOp) { MCCMCMicroOp<double>* ptrMCC = nullptr; MCCMCMicroOp<double>* nullPointerMCC = nullptr; BOOST_CHECK_EQUAL(ptrMCC, nullPointerMCC); } BOOST_AUTO_TEST_CASE(destructorMCCMCMicroOp) { MCCMCMicroOp<double>* ptrMCC = nullptr; ptrMCC = new MCCMCMicroOp<double>(); delete ptrMCC; } BOOST_AUTO_TEST_CASE(gettersAndSettersMCCMCMicroOp) { MCCMCMicroOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MCCMCMicroOp"); } /** MAEOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAEOp) { MAEOp<double>* ptrMAE = nullptr; MAEOp<double>* nullPointerMAE = nullptr; BOOST_CHECK_EQUAL(ptrMAE, nullPointerMAE); } BOOST_AUTO_TEST_CASE(destructorMAEOp) { MAEOp<double>* ptrMAE = nullptr; ptrMAE = new MAEOp<double>(); delete ptrMAE; } BOOST_AUTO_TEST_CASE(gettersAndSettersMAEOp) { MAEOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MAEOp"); BOOST_CHECK_EQUAL(operation.getReductionFunc(), "Sum"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); MAEOp<float> operation2(std::string("Mean")); BOOST_CHECK_EQUAL(operation2.getReductionFunc(), "Mean"); } /** CosineSimilarityOp Tests */ BOOST_AUTO_TEST_CASE(constructorCosineSimilarityOp) { CosineSimilarityOp<double>* ptrCosineSimilarity = nullptr; CosineSimilarityOp<double>* nullPointerCosineSimilarity = nullptr; BOOST_CHECK_EQUAL(ptrCosineSimilarity, nullPointerCosineSimilarity); } BOOST_AUTO_TEST_CASE(destructorCosineSimilarityOp) { CosineSimilarityOp<double>* ptrCosineSimilarity = nullptr; ptrCosineSimilarity = new CosineSimilarityOp<double>(); delete ptrCosineSimilarity; } BOOST_AUTO_TEST_CASE(gettersAndSettersCosineSimilarityOp) { CosineSimilarityOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "CosineSimilarityOp"); BOOST_CHECK_EQUAL(operation.getReductionFunc(), "Sum"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); CosineSimilarityOp<float> operation2(std::string("Mean")); BOOST_CHECK_EQUAL(operation2.getReductionFunc(), "Mean"); } /** PearsonROp Tests */ BOOST_AUTO_TEST_CASE(constructorPearsonROp) { PearsonROp<double>* ptrPearsonR = nullptr; PearsonROp<double>* nullPointerPearsonR = nullptr; BOOST_CHECK_EQUAL(ptrPearsonR, nullPointerPearsonR); } BOOST_AUTO_TEST_CASE(destructorPearsonROp) { PearsonROp<double>* ptrPearsonR = nullptr; ptrPearsonR = new PearsonROp<double>(); delete ptrPearsonR; } BOOST_AUTO_TEST_CASE(gettersAndSettersPearsonROp) { PearsonROp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "PearsonROp"); BOOST_CHECK_EQUAL(operation.getReductionFunc(), "Sum"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); PearsonROp<float> operation2(std::string("Mean")); BOOST_CHECK_EQUAL(operation2.getReductionFunc(), "Mean"); } /** EuclideanDistOp Tests */ BOOST_AUTO_TEST_CASE(constructorEuclideanDistOp) { EuclideanDistOp<double>* ptrEuclideanDist = nullptr; EuclideanDistOp<double>* nullPointerEuclideanDist = nullptr; BOOST_CHECK_EQUAL(ptrEuclideanDist, nullPointerEuclideanDist); } BOOST_AUTO_TEST_CASE(destructorEuclideanDistOp) { EuclideanDistOp<double>* ptrEuclideanDist = nullptr; ptrEuclideanDist = new EuclideanDistOp<double>(); delete ptrEuclideanDist; } BOOST_AUTO_TEST_CASE(gettersAndSettersEuclideanDistOp) { EuclideanDistOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "EuclideanDistOp"); BOOST_CHECK_EQUAL(operation.getReductionFunc(), "Sum"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); EuclideanDistOp<float> operation2(std::string("Mean")); BOOST_CHECK_EQUAL(operation2.getReductionFunc(), "Mean"); } /** ManhattanDistOp Tests */ BOOST_AUTO_TEST_CASE(constructorManhattanDistOp) { ManhattanDistOp<double>* ptrManhattanDist = nullptr; ManhattanDistOp<double>* nullPointerManhattanDist = nullptr; BOOST_CHECK_EQUAL(ptrManhattanDist, nullPointerManhattanDist); } BOOST_AUTO_TEST_CASE(destructorManhattanDistOp) { ManhattanDistOp<double>* ptrManhattanDist = nullptr; ptrManhattanDist = new ManhattanDistOp<double>(); delete ptrManhattanDist; } BOOST_AUTO_TEST_CASE(gettersAndSettersManhattanDistOp) { ManhattanDistOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "ManhattanDistOp"); BOOST_CHECK_EQUAL(operation.getReductionFunc(), "Sum"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); ManhattanDistOp<float> operation2(std::string("Mean")); BOOST_CHECK_EQUAL(operation2.getReductionFunc(), "Mean"); } /** JeffreysAndMatusitaDistOp Tests */ BOOST_AUTO_TEST_CASE(constructorJeffreysAndMatusitaDistOp) { JeffreysAndMatusitaDistOp<double>* ptrJeffreysAndMatusitaDist = nullptr; JeffreysAndMatusitaDistOp<double>* nullPointerJeffreysAndMatusitaDist = nullptr; BOOST_CHECK_EQUAL(ptrJeffreysAndMatusitaDist, nullPointerJeffreysAndMatusitaDist); } BOOST_AUTO_TEST_CASE(destructorJeffreysAndMatusitaDistOp) { JeffreysAndMatusitaDistOp<double>* ptrJeffreysAndMatusitaDist = nullptr; ptrJeffreysAndMatusitaDist = new JeffreysAndMatusitaDistOp<double>(); delete ptrJeffreysAndMatusitaDist; } BOOST_AUTO_TEST_CASE(gettersAndSettersJeffreysAndMatusitaDistOp) { JeffreysAndMatusitaDistOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "JeffreysAndMatusitaDistOp"); BOOST_CHECK_EQUAL(operation.getReductionFunc(), "Sum"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); JeffreysAndMatusitaDistOp<float> operation2(std::string("Mean")); BOOST_CHECK_EQUAL(operation2.getReductionFunc(), "Mean"); } /** LogarithmicDistOp Tests */ BOOST_AUTO_TEST_CASE(constructorLogarithmicDistOp) { LogarithmicDistOp<double>* ptrLogarithmicDist = nullptr; LogarithmicDistOp<double>* nullPointerLogarithmicDist = nullptr; BOOST_CHECK_EQUAL(ptrLogarithmicDist, nullPointerLogarithmicDist); } BOOST_AUTO_TEST_CASE(destructorLogarithmicDistOp) { LogarithmicDistOp<double>* ptrLogarithmicDist = nullptr; ptrLogarithmicDist = new LogarithmicDistOp<double>(); delete ptrLogarithmicDist; } BOOST_AUTO_TEST_CASE(gettersAndSettersLogarithmicDistOp) { LogarithmicDistOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "LogarithmicDistOp"); BOOST_CHECK_EQUAL(operation.getReductionFunc(), "Sum"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); LogarithmicDistOp<float> operation2(std::string("Mean")); BOOST_CHECK_EQUAL(operation2.getReductionFunc(), "Mean"); } /** PercentDifferenceOp Tests */ BOOST_AUTO_TEST_CASE(constructorPercentDifferenceOp) { PercentDifferenceOp<double>* ptrPercentDifference = nullptr; PercentDifferenceOp<double>* nullPointerPercentDifference = nullptr; BOOST_CHECK_EQUAL(ptrPercentDifference, nullPointerPercentDifference); } BOOST_AUTO_TEST_CASE(destructorPercentDifferenceOp) { PercentDifferenceOp<double>* ptrPercentDifference = nullptr; ptrPercentDifference = new PercentDifferenceOp<double>(); delete ptrPercentDifference; } BOOST_AUTO_TEST_CASE(gettersAndSettersPercentDifferenceOp) { PercentDifferenceOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "PercentDifferenceOp"); BOOST_CHECK_EQUAL(operation.getReductionFunc(), "Sum"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 0); PercentDifferenceOp<float> operation2(std::string("Mean")); BOOST_CHECK_EQUAL(operation2.getReductionFunc(), "Mean"); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Model3 test suite // #include <boost/test/unit_test.hpp> // changes every so often... #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/Model.h> #include <EvoNet/ml/Link.h> #include <EvoNet/ml/Node.h> #include <vector> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(model3) /** * Part 1 test suit for the Model class * * The following test methods that do not require * a toy network model to test */ BOOST_AUTO_TEST_CASE(constructor) { Model<float>* ptr = nullptr; Model<float>* nullPointer = nullptr; ptr = new Model<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { Model<float>* ptr = nullptr; ptr = new Model<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(constructor2) { Model<float> model(1); BOOST_CHECK_EQUAL(model.getId(), 1); } BOOST_AUTO_TEST_CASE(gettersAndSetters) { // TODO: This is not complete (e.g., cyclic_pairs_ getter/setter) Model<float> model; model.setId(1); model.setName("model1"); Eigen::Tensor<float, 2> error(3, 1), metrics(3, 1); error.setConstant(1); metrics.setConstant(2); model.setError(error); model.setMetric(metrics); BOOST_CHECK_EQUAL(model.getId(), 1); BOOST_CHECK_EQUAL(model.getName(), "model1"); BOOST_CHECK_EQUAL(model.getError()(2, 0), 1); BOOST_CHECK_EQUAL(model.getMetric()(2, 0), 2); } BOOST_AUTO_TEST_CASE(pruneNodes) { Node<float> source1, sink1; Link link1; Weight<float> weight1; source1 = Node<float>("0", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink1 = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); weight1 = Weight<float>("0", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(2.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10))); link1 = Link("0", source1.getName(), sink1.getName(), weight1.getName()); Model<float> model; std::vector<Node<float>> nodes_test; nodes_test.push_back(source1); nodes_test.push_back(sink1); // should not fail model.pruneNodes(); model.addNodes({source1, sink1}); model.pruneNodes(); BOOST_CHECK_EQUAL(model.getNodes().size(), 0); model.addNodes({source1, sink1}); model.addLinks({link1}); model.addWeights({weight1}); model.pruneNodes(); for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); } } BOOST_AUTO_TEST_CASE(pruneWeights) { Node<float> source1, sink1; Link link1; Weight<float> weight1; source1 = Node<float>("0", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink1 = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); weight1 = Weight<float>("0", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(2.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10))); link1 = Link("0", source1.getName(), sink1.getName(), weight1.getName()); Model<float> model; std::vector<Weight<float>> weights_test; weights_test.push_back(weight1); // should not fail model.pruneWeights(); model.addWeights({weight1}); model.pruneWeights(); BOOST_CHECK_EQUAL(model.getWeights().size(), 0); model.addWeights({weight1}); model.addNodes({source1, sink1}); model.addLinks({link1}); model.pruneWeights(); for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); } } BOOST_AUTO_TEST_CASE(pruneLinks) { Node<float> source1, sink1; Link link1; Weight<float> weight1; source1 = Node<float>("0", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink1 = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); weight1 = Weight<float>("0", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(2.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10))); link1 = Link("0", source1.getName(), sink1.getName(), weight1.getName()); Model<float> model; std::vector<Node<float>> nodes_test; nodes_test.push_back(source1); nodes_test.push_back(sink1); std::vector<Link> links_test; links_test.push_back(link1); std::vector<Weight<float>> weights_test; weights_test.push_back(weight1); // should not fail model.pruneLinks(); model.addNodes({source1, sink1}); model.addWeights({weight1}); model.pruneLinks(); for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); } for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); } model.addLinks({link1}); model.pruneLinks(); for (int i=0; i<links_test.size(); ++i) { BOOST_CHECK(model.getLink(links_test[i].getName()) == links_test[i]); } for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); } for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); } } BOOST_AUTO_TEST_CASE(addGetRemoveNodes) { Node<float> source1, sink1, source2, sink2; source1 = Node<float>("0", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink1 = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Model<float> model; // add nodes to the model model.addNodes({source1, sink1}); // make test nodes std::vector<Node<float>> nodes_test; nodes_test.push_back(source1); nodes_test.push_back(sink1); for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); BOOST_CHECK(model.getNodes()[i] == nodes_test[i]); } // add more nodes to the model source2 = Node<float>("2", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink2 = Node<float>("3", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // add nodes to the model model.addNodes({source2, sink2}); nodes_test.push_back(source2); nodes_test.push_back(sink2); for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); BOOST_CHECK(model.getNodes()[i] == nodes_test[i]); } // remove nodes from the model model.removeNodes({"2", "3"}); nodes_test = {source1, sink1}; for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); BOOST_CHECK(model.getNodes()[i] == nodes_test[i]); } } BOOST_AUTO_TEST_CASE(addGetRemoveWeights) { Weight<float> weight1, weight2; weight1 = Weight<float>("0", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(2.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10))); Model<float> model; // add weights to the model model.addWeights({weight1}); // make test weights std::vector<Weight<float>> weights_test; weights_test.push_back(weight1); for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); BOOST_CHECK(model.getWeights()[i] == weights_test[i]); } // add more weights to the model weight2 = Weight<float>("1", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(2.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10))); // add weights to the model model.addWeights({weight2}); weights_test.push_back(weight2); for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); BOOST_CHECK(model.getWeights()[i] == weights_test[i]); } // remove weights from the model model.removeWeights({"1"}); weights_test = {weight1}; for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); BOOST_CHECK(model.getWeights()[i] == weights_test[i]); } } BOOST_AUTO_TEST_CASE(addGetRemoveLinks) { Node<float> source1, sink1; Link link1, link2; source1 = Node<float>("0", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink1 = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Weight<float> weight1; weight1 = Weight<float>("0", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(2.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10))); link1 = Link("0", source1.getName(), sink1.getName(), weight1.getName()); Model<float> model; // add links (but not nodes) to the model model.addLinks({link1}); std::vector<Link> links_test; // make test links links_test.push_back(link1); for (int i=0; i<links_test.size(); ++i) { BOOST_CHECK(model.getLink(links_test[i].getName()) == links_test[i]); BOOST_CHECK(model.getLinks()[i] == links_test[i]); } std::vector<Node<float>> nodes_test; nodes_test.push_back(source1); nodes_test.push_back(sink1); std::vector<Weight<float>> weights_test; weights_test.push_back(weight1); // add nodes to the model model.addNodes({source1, sink1}); for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); } // add weights to the model model.addWeights({weight1}); for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); } // add more links and nodes to the model Node<float> source2, sink2; source2 = Node<float>("2", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink2 = Node<float>("3", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Weight<float> weight2; weight2 = Weight<float>("1", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(2.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10))); link2 = Link("1", source2.getName(), sink2.getName(), weight2.getName()); // add nodes to the model model.addNodes({source2, sink2}); nodes_test.push_back(source2); nodes_test.push_back(sink2); for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); } // add weights to the model model.addWeights({weight2}); weights_test.push_back(weight2); for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); } // add links to the model model.addLinks({link2}); links_test.push_back(link2); for (int i=0; i<links_test.size(); ++i) { BOOST_CHECK(model.getLink(links_test[i].getName()) == links_test[i]); BOOST_CHECK(model.getLinks()[i] == links_test[i]); } // remove links from the model model.removeLinks({"1"}); links_test = {link1}; for (int i=0; i<links_test.size(); ++i) { BOOST_CHECK(model.getLink(links_test[i].getName()) == links_test[i]); BOOST_CHECK(model.getLinks()[i] == links_test[i]); } nodes_test = {source1, sink1}; for (int i=0; i<nodes_test.size(); ++i) { BOOST_CHECK(model.getNode(nodes_test[i].getName()) == nodes_test[i]); } weights_test = {weight1}; for (int i=0; i<weights_test.size(); ++i) { BOOST_CHECK(model.getWeight(weights_test[i].getName()) == weights_test[i]); } } //TODO: comparison is failing! BOOST_AUTO_TEST_CASE(comparison) { Node<float> source, sink; Link link1, link2; source = Node<float>("1", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink = Node<float>("2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Weight<float> weight1; weight1 = Weight<float>("0", std::make_shared<RandWeightInitOp<float>>(RandWeightInitOp<float>(2.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8, 10))); link1 = Link("1", source.getName(), sink.getName(), weight1.getName()); link2 = Link("2", source.getName(), sink.getName(), weight1.getName()); Model<float> model1(1); Model<float> model2(1); // Check equal // BOOST_CHECK(model1 == model2); //fail model1.addLinks({link1}); model2.addLinks({link1}); // BOOST_CHECK(model1 == model2); //fail // Check not equal model1.addNodes({source, sink}); BOOST_CHECK(model1 != model2); // Check equal model2.addNodes({source, sink}); // BOOST_CHECK(model1 == model2); //fail // Check not equal model1.addWeights({weight1}); BOOST_CHECK(model1 != model2); //fail // Check equal model2.addWeights({weight1}); // BOOST_CHECK(model1 == model2); //fail // Check not equal model2.setId(2); BOOST_CHECK(model1 != model2); model2.setId(1); model2.addLinks({link2}); BOOST_CHECK(model1 != model2); } BOOST_AUTO_TEST_CASE(copyAssignment) { Node<float> source1, sink1, source2, sink2; Link link1, link2; source1 = Node<float>("1.1", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink1 = Node<float>("1.2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); source2 = Node<float>("2.1", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink2 = Node<float>("2.2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Weight<float> weight1, weight2; std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; weight_init.reset(new RandWeightInitOp<float>(1)); solver.reset(new AdamOp<float>(0.01, 0.9, 0.999, 1e-8)); weight1 = Weight<float>("1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(1)); solver.reset(new AdamOp<float>(0.01, 0.9, 0.999, 1e-8)); weight2 = Weight<float>("2", weight_init, solver); link1 = Link("1", source1.getName(), sink1.getName(), weight1.getName()); link2 = Link("2", source2.getName(), sink2.getName(), weight2.getName()); Model<float> model1(1); model1.addLinks({ link1, link2 }); model1.addWeights({ weight1, weight2 }); model1.addNodes({ source1, sink1, source2, sink2 }); // test copy assignment Model<float> model2 = model1; BOOST_CHECK(model1 != model2); Model<float> model3 = model1; BOOST_CHECK(model1 != model3); // test references model2.removeLinks({ "1" }); model2.pruneModel(1); BOOST_CHECK(model1 != model2); BOOST_CHECK_EQUAL(model1.getLink("1").getName(), "1"); } BOOST_AUTO_TEST_CASE(copy) { Node<float> source1, sink1, source2, sink2; Link link1, link2; source1 = Node<float>("1.1", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink1 = Node<float>("1.2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); source2 = Node<float>("2.1", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); sink2 = Node<float>("2.2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Weight<float> weight1, weight2; std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; weight_init.reset(new RandWeightInitOp<float>(1)); solver.reset(new AdamOp<float>(0.01, 0.9, 0.999, 1e-8)); weight1 = Weight<float>("1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(1)); solver.reset(new AdamOp<float>(0.01, 0.9, 0.999, 1e-8)); weight2 = Weight<float>("2", weight_init, solver); link1 = Link("1", source1.getName(), sink1.getName(), weight1.getName()); link2 = Link("2", source2.getName(), sink2.getName(), weight2.getName()); Model<float> model1(1); model1.addLinks({link1, link2}); model1.addWeights({weight1, weight2}); model1.addNodes({source1, sink1, source2, sink2}); // test copy Model<float> model2(model1); BOOST_CHECK(model1 != model2); Model<float> model3 = model1; BOOST_CHECK(model1 != model3); // test references model2.removeLinks({"1"}); model2.pruneModel(1); BOOST_CHECK(model1 != model2); BOOST_CHECK_EQUAL(model1.getLink("1").getName(), "1"); } BOOST_AUTO_TEST_CASE(pruneModel) { // minimal toy model Node<float> input, hidden, output; input = Node<float>("i", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); hidden = Node<float>("h", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); output = Node<float>("o", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Weight<float> w_i_to_h, w_h_to_o; std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_i_to_h = Weight<float>("i_to_h", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h_to_o = Weight<float>("h_to_o", weight_init, solver); Link l_i_to_h, l_h_to_o; l_i_to_h = Link("i_to_h", "i", "h", "i_to_h"); l_h_to_o = Link("h_to_o", "h", "o", "h_to_o"); Model<float> model; model.addNodes({input, hidden, output}); model.addWeights({w_i_to_h, w_h_to_o}); model.addLinks({l_i_to_h, l_h_to_o}); model.pruneModel(); BOOST_CHECK_EQUAL(model.getNodes().size(), 3); BOOST_CHECK_EQUAL(model.getWeights().size(), 2); BOOST_CHECK_EQUAL(model.getLinks().size(), 2); model.removeLinks({"i_to_h"}); BOOST_CHECK_EQUAL(model.getNodes().size(), 3); BOOST_CHECK_EQUAL(model.getWeights().size(), 2); // was 2 when wieghts were pruned after links were removed BOOST_CHECK_EQUAL(model.getLinks().size(), 1); model.pruneModel(1); BOOST_CHECK_EQUAL(model.getNodes().size(), 2); BOOST_CHECK_EQUAL(model.getWeights().size(), 1); BOOST_CHECK_EQUAL(model.getLinks().size(), 1); model.removeNodes({"h"}); BOOST_CHECK_EQUAL(model.getNodes().size(), 1); BOOST_CHECK_EQUAL(model.getWeights().size(), 1); BOOST_CHECK_EQUAL(model.getLinks().size(), 1); model.pruneModel(1); BOOST_CHECK_EQUAL(model.getNodes().size(), 1); // was 0 BOOST_CHECK_EQUAL(model.getWeights().size(), 0); BOOST_CHECK_EQUAL(model.getLinks().size(), 0); } BOOST_AUTO_TEST_CASE(checkNodeNames) { // Test model Node<float> input, hidden, output; input = Node<float>("i", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); hidden = Node<float>("h", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); output = Node<float>("o", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Model<float> model; model.addNodes({input, hidden, output}); std::vector<std::string> node_names; node_names = {"i", "h", "o"}; BOOST_CHECK(model.checkNodeNames(node_names)); node_names = {"i", "h", "a"}; // no "a" node BOOST_CHECK(!model.checkNodeNames(node_names)); } BOOST_AUTO_TEST_CASE(checkLinkNames) { // Test model Link l_i_to_h, l_h_to_o; l_i_to_h = Link("i_to_h", "i", "h", "i_to_h"); l_h_to_o = Link("h_to_o", "h", "o", "h_to_o"); Model<float> model; model.addLinks({l_i_to_h, l_h_to_o}); std::vector<std::string> link_names; link_names = {"i_to_h", "h_to_o"}; BOOST_CHECK(model.checkLinkNames(link_names)); link_names = {"i_to_h", "h_to_i"}; // no "h_to_i" link BOOST_CHECK(!model.checkLinkNames(link_names)); } BOOST_AUTO_TEST_CASE(checkWeightNames) { // Test model Weight<float> w_i_to_h, w_h_to_o; std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_i_to_h = Weight<float>("i_to_h", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h_to_o = Weight<float>("h_to_o", weight_init, solver); Model<float> model; model.addWeights({w_i_to_h, w_h_to_o}); std::vector<std::string> weight_names; weight_names = {"i_to_h", "h_to_o"}; BOOST_CHECK(model.checkWeightNames(weight_names)); weight_names = {"i_to_h", "h_to_i"}; // no "h_to_i" weight BOOST_CHECK(!model.checkWeightNames(weight_names)); } BOOST_AUTO_TEST_CASE(clearCache) { // No tests } BOOST_AUTO_TEST_CASE(setInputAndOutputNodes) { Node<float> i1, i2, o1, o2; i1 = Node<float>("i1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("i2", NodeType::hidden, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("o1", NodeType::output, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("o2", NodeType::hidden, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); std::vector<std::string> input_nodes = { "i1", "i2" }; std::vector<std::string> output_nodes = { "o1", "o2" }; // model 1: fully connected model Model<float> model1; model1.addNodes({ i1, i2, o1, o2 }); BOOST_CHECK_EQUAL(model1.getInputNodes().size(), 1); BOOST_CHECK_EQUAL(model1.getOutputNodes().size(), 1); // Specify the node types manually for (const std::string& node_name : output_nodes) { model1.nodes_.at(node_name)->setType(NodeType::output); } for (const std::string& node_name : input_nodes) { model1.nodes_.at(node_name)->setType(NodeType::input); } model1.setInputAndOutputNodes(); BOOST_CHECK_EQUAL(model1.getInputNodes().size(), 2); BOOST_CHECK_EQUAL(model1.getOutputNodes().size(), 2); BOOST_CHECK(model1.getInputNodes()[0] == model1.getNodesMap().at("i1")); BOOST_CHECK(model1.getInputNodes()[1] == model1.getNodesMap().at("i2")); BOOST_CHECK(model1.getOutputNodes()[0] == model1.getNodesMap().at("o1")); BOOST_CHECK(model1.getOutputNodes()[1] == model1.getNodesMap().at("o2")); } BOOST_AUTO_TEST_CASE(getInputAndOutputNodes) { Node<float> i1, i2, o1, o2; i1 = Node<float>("i1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("i2", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("o1", NodeType::output, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("o2", NodeType::output, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); std::vector<std::string> input_nodes = { "i1", "i2" }; std::vector<std::string> output_nodes = { "o1", "o2" }; // model 1: fully connected model Model<float> model1; model1.addNodes({ i1, i2, o1, o2 }); BOOST_CHECK(model1.getInputNodes()[0] == model1.getNodesMap().at("i1")); BOOST_CHECK(model1.getInputNodes()[1] == model1.getNodesMap().at("i2")); BOOST_CHECK(model1.getOutputNodes()[0] == model1.getNodesMap().at("o1")); BOOST_CHECK(model1.getOutputNodes()[1] == model1.getNodesMap().at("o2")); } BOOST_AUTO_TEST_CASE(checkCompleteInputToOutput) { Node<float> i1, i2, h1, o1, o2, b1; i1 = Node<float>("i1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("i2", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("h1", NodeType::hidden, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("o1", NodeType::output, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("o2", NodeType::output, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<float>("b1", NodeType::bias, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Weight<float> w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2, w_h1_h1, w_b1_h1; std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_i1_h1 = Weight<float>("w_i1_h1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_i2_h1 = Weight<float>("w_i2_h1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h1_o1 = Weight<float>("w_h1_o1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h1_o2 = Weight<float>("w_h1_o2", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h1_h1 = Weight<float>("w_h1_h1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_b1_h1 = Weight<float>("w_b1_h1", weight_init, solver); Link l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2, l_h1_h1, l_b1_h1; l_i1_h1 = Link("l_i1_h1", "i1", "h1", "w_i1_h1"); l_i2_h1 = Link("l_i2_h1", "i2", "h1", "w_i2_h1"); l_h1_o1 = Link("l_h1_o1", "h1", "o1", "w_h1_o1"); l_h1_o2 = Link("l_h1_o2", "h1", "o2", "w_h1_o2"); l_h1_h1 = Link("l_h1_h1", "h1", "h1", "w_h1_h1"); l_b1_h1 = Link("l_b1_h1", "b1", "h1", "w_b1_h1"); std::vector<std::string> input_nodes = { "i1", "i2" }; std::vector<std::string> output_nodes = { "o1", "o2" }; // model 1: fully connected model Model<float> model1; model1.addNodes({ i1, i2, h1, o1, o2 }); model1.addWeights({ w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2 }); model1.addLinks({ l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2 }); BOOST_CHECK(model1.checkCompleteInputToOutput()); // model 2: disconnected output Model<float> model2; model2.addNodes({ i1, i2, h1, o1, o2 }); model2.addWeights({ w_i1_h1, w_i2_h1, w_h1_o2 }); model2.addLinks({ l_i1_h1, l_i2_h1, l_h1_o2 }); BOOST_CHECK(!model2.checkCompleteInputToOutput()); // model 3: disconnected input Model<float> model3; model3.addNodes({ i1, i2, h1, o1, o2 }); model3.addWeights({ w_i1_h1, w_h1_o1, w_h1_o2 }); model3.addLinks({ l_i1_h1, l_h1_o1, l_h1_o2 }); BOOST_CHECK(!model3.checkCompleteInputToOutput()); // model 4: missing input nodes (cannot detect!) Model<float> model4; model4.addNodes({ i2, h1, o1, o2 }); model4.addWeights({ w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2 }); model4.addLinks({ l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2 }); BOOST_CHECK(model4.checkCompleteInputToOutput()); // model 5: missing output nodes (cannot detect!) Model<float> model5; model5.addNodes({ i1, i2, h1, o2 }); model5.addWeights({ w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2 }); model5.addLinks({ l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2 }); BOOST_CHECK(model5.checkCompleteInputToOutput()); // model 1: fully connected model with self loop Model<float> model6; model6.addNodes({ i1, i2, h1, o1, o2 }); model6.addWeights({ w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2, w_h1_h1 }); model6.addLinks({ l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2, l_h1_h1 }); BOOST_CHECK(model6.checkCompleteInputToOutput()); // model 1: fully connected model with bias Model<float> model7; model7.addNodes({ i1, i2, h1, o1, o2, b1 }); model7.addWeights({ w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2, w_b1_h1 }); model7.addLinks({ l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2, l_b1_h1 }); BOOST_CHECK(model7.checkCompleteInputToOutput()); } BOOST_AUTO_TEST_CASE(removeIsolatedNodes) { Node<float> i1, i2, h1, h2, o1, o2, b2; i1 = Node<float>("i1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("i2", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("h1", NodeType::hidden, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h2 = Node<float>("h2", NodeType::hidden, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // deadend b2 = Node<float>("b2", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // deadend o1 = Node<float>("o1", NodeType::output, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("o2", NodeType::output, NodeStatus::activated, std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); Weight<float> w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2, w_h1_h2, w_h2_h1, w_b2_h2; // deadend std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_i1_h1 = Weight<float>("w_i1_h1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_i2_h1 = Weight<float>("w_i2_h1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h1_o1 = Weight<float>("w_h1_o1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h1_o2 = Weight<float>("w_h1_o2", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h1_h2 = Weight<float>("w_h1_h2", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_h2_h1 = Weight<float>("w_h2_h1", weight_init, solver); weight_init.reset(new RandWeightInitOp<float>(2.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w_b2_h2 = Weight<float>("w_b2_h2", weight_init, solver); Link l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2, l_h1_h2, l_h2_h1, l_b2_h2; // deadend l_i1_h1 = Link("l_i1_h1", "i1", "h1", "w_i1_h1"); l_i2_h1 = Link("l_i2_h1", "i2", "h1", "w_i2_h1"); l_h1_o1 = Link("l_h1_o1", "h1", "o1", "w_h1_o1"); l_h1_o2 = Link("l_h1_o2", "h1", "o2", "w_h1_o2"); l_h1_h2 = Link("l_h1_h2", "h1", "h2", "w_h1_h2"); l_h2_h1 = Link("l_h2_h1", "h2", "h1", "w_h2_h1"); l_b2_h2 = Link("l_b2_h2", "b2", "h2", "w_b2_h2"); // model 1: fully connected model Model<float> model1; model1.addNodes({ i1, i2, h1, o1, o2 }); model1.addWeights({ w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2 }); model1.addLinks({ l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2 }); BOOST_CHECK(!model1.removeIsolatedNodes()); BOOST_CHECK_EQUAL(model1.getNodes().size(), 5); // model 2: h2 is a dead end sink Model<float> model2; model2.addNodes({ i1, i2, h1, h2, o1, o2, b2 }); model2.addWeights({ w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2, w_h1_h2, w_b2_h2 }); model2.addLinks({ l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2, l_h1_h2, l_b2_h2 }); BOOST_CHECK(model2.removeIsolatedNodes()); BOOST_CHECK_EQUAL(model2.getNodes().size(), 6); // model 3: h2 is a dead end source Model<float> model3; model3.addNodes({ i1, i2, h1, h2, o1, o2, b2 }); model3.addWeights({ w_i1_h1, w_i2_h1, w_h1_o1, w_h1_o2, w_h2_h1, w_b2_h2 }); model3.addLinks({ l_i1_h1, l_i2_h1, l_h1_o1, l_h1_o2, l_h2_h1, l_b2_h2 }); BOOST_CHECK(model3.removeIsolatedNodes()); BOOST_CHECK_EQUAL(model3.getNodes().size(), 6); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelReplicatorExperimental.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/Parameters.h> #include <EvoNet/simulator/MNISTSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerExperimentalDefaultDevice<TensorT> { public: /* @brief Fully Connected Bayesian model with Xavier-like initialization Reference: Blundell 2015 Weight uncertainty in neural networks arXiv:1505.05424 @param[in, out] model The network model @param[in] n_inputs The number of input pixels @param[in] n_outputs The number of output labels @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeFullyConnBayes(Model<TensorT>& model, const int& n_inputs = 784, const int& n_outputs = 10, const int& n_hidden_0 = 512, const int& n_hidden_1 = 512, const int& n_hidden_2 = 512, const bool& add_gaussian = false, const TensorT& logvar_1 = -1, const TensorT& logvar_2 = -4, const TensorT& pi = 0.5, const bool& specify_layers = false, const TensorT& learning_rate = 1e-3, const TensorT& gradient_clipping = 100) { model.setId(0); model.setName("FullyConnectedBayesClassifier"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` auto activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); auto activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); auto activation_linear = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); auto activation_linear_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the weight inits std::shared_ptr<WeightInitOp<TensorT>> weight_init_mu, weight_init_logvar; if (n_hidden_0 > 0) { weight_init_mu = std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(n_inputs + n_hidden_0) / 2, 1)); weight_init_logvar = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(-12 / n_hidden_0))); } else { weight_init_mu = std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(n_inputs + n_outputs) / 2, 1)); weight_init_logvar = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(TensorT(-12 / n_outputs))); } // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(learning_rate, 0.9, 0.999, 1e-8, gradient_clipping)); auto solver_dummy_op = std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()); // Define the nodes std::vector<std::string> node_names_mu, node_names_logvar, node_names_encoding, node_names_input, node_names_prior, node_names_posterior; // Add the 1st FC layer if (n_hidden_0 > 0) { node_names_input = node_names; if (add_gaussian) { // Add the bayesian nodes node_names = model_builder.addFullyConnectedBayesian(model, "EN0", "EN0", node_names_input, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, weight_init_mu, solver_op, weight_init_logvar, solver_op, logvar_1, logvar_2, pi, node_names_logvar, node_names_posterior, node_names_prior, specify_layers); // Add the actual output nodes node_names_posterior = model_builder.addSinglyConnected(model, "EN0Posterior", "EN0Posterior", node_names_posterior, node_names_posterior.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); node_names_prior = model_builder.addSinglyConnected(model, "EN0Prior", "EN0Prior", node_names_prior, node_names_prior.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); node_names_logvar = model_builder.addSinglyConnected(model, "EN0LogVar", "EN0LogVar", node_names_logvar, node_names_logvar.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_posterior) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_prior) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_logvar) model.nodes_.at(node_name)->setType(NodeType::output); } else { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names_input, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_input.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } } // Add the 2nd FC layer if (n_hidden_1 > 0) { node_names_input = node_names; if (add_gaussian) { // Add the bayesian nodes node_names = model_builder.addFullyConnectedBayesian(model, "EN1", "EN1", node_names_input, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, weight_init_mu, solver_op, weight_init_logvar, solver_op, logvar_1, logvar_2, pi, node_names_logvar, node_names_posterior, node_names_prior, specify_layers); // Add the actual output nodes node_names_posterior = model_builder.addSinglyConnected(model, "EN1Posterior", "EN1Posterior", node_names_posterior, node_names_posterior.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); node_names_prior = model_builder.addSinglyConnected(model, "EN1Prior", "EN1Prior", node_names_prior, node_names_prior.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); node_names_logvar = model_builder.addSinglyConnected(model, "EN1LogVar", "EN1LogVar", node_names_logvar, node_names_logvar.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_posterior) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_prior) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_logvar) model.nodes_.at(node_name)->setType(NodeType::output); } else { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names_input, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_input.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } } // Add the output FC layer if (n_outputs > 0) { node_names_input = node_names; if (add_gaussian) { // Add the bayesian nodes node_names = model_builder.addFullyConnectedBayesian(model, "EN2", "EN2", node_names_input, n_outputs, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, weight_init_mu, solver_op, weight_init_logvar, solver_op, logvar_1, logvar_2, pi, node_names_logvar, node_names_posterior, node_names_prior, specify_layers); // Add the actual output nodes node_names_posterior = model_builder.addSinglyConnected(model, "EN2Posterior", "EN2Posterior", node_names_posterior, node_names_posterior.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); node_names_prior = model_builder.addSinglyConnected(model, "EN2Prior", "EN2Prior", node_names_prior, node_names_prior.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); node_names_logvar = model_builder.addSinglyConnected(model, "EN2LogVar", "EN2LogVar", node_names_logvar, node_names_logvar.size(), activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_posterior) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_prior) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_logvar) model.nodes_.at(node_name)->setType(NodeType::output); } else { node_names = model_builder.addFullyConnected(model, "EN2", "EN2", node_names_input, n_outputs, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_input.size() + n_outputs) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } } // Add the actual output nodes node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_outputs, activation_linear, activation_linear_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_dummy_op, 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } }; template<typename TensorT> class DataSimulatorExt : public MNISTSimulator<TensorT> { public: bool add_gaussian_ = false; int n_hidden_0_ = 0; int n_hidden_1_ = 0; void simulateData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps, const bool& is_train) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices(this->training_data.dimension(1)); if (is_train) sample_indices = this->getTrainingIndices(batch_size, 1); else sample_indices = this->getValidationIndices(batch_size, 1); // pull out the training data and labels Eigen::Tensor<TensorT, 3> training_data(batch_size, memory_size, this->training_data.dimension(1)); Eigen::Tensor<TensorT, 3> training_labels(batch_size, memory_size, this->training_labels.dimension(1)); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < this->training_data.dimension(1); ++nodes_iter) { if (is_train) training_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], nodes_iter); else training_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], nodes_iter); } for (int nodes_iter = 0; nodes_iter < this->training_labels.dimension(1); ++nodes_iter) { if (is_train) training_labels(batch_iter, memory_iter, nodes_iter) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); else training_labels(batch_iter, memory_iter, nodes_iter) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); } } } // Assign the input data input_data.setZero(); input_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, 0 }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) })) = training_data; // Assign the input data loss_output_data.setConstant(TensorT(1)); // negative log likelihood expected value loss_output_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, 0 }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_labels.dimension(1) })) = training_labels; // Assign the input data metric_output_data.setZero(); // in order to compute the total magnitude of the logvar metric_output_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, 0 }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_labels.dimension(1) })) = training_labels; assert(memory_size == 1); if (add_gaussian_) { if (n_hidden_0_ > 0 && n_hidden_1_ > 0) { assert(n_output_nodes == this->training_labels.dimension(1) + 2 * this->training_data.dimension(1) * n_hidden_0_ + 2 * n_hidden_0_ * n_hidden_1_ + 2 * n_hidden_1_ * this->training_labels.dimension(1)); assert(n_metric_output_nodes == this->training_labels.dimension(1) + this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * n_hidden_1_ + n_hidden_1_ * this->training_labels.dimension(1)); assert(n_input_nodes == this->training_data.dimension(1) + this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * n_hidden_1_ + n_hidden_1_ * this->training_labels.dimension(1)); // Gaussian sampler input/output data Eigen::Tensor<TensorT, 3> gaussian_samples(batch_size, memory_size, this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * n_hidden_1_ + n_hidden_1_ * this->training_labels.dimension(1)); if (is_train) gaussian_samples = GaussianSampler<TensorT>(batch_size * memory_size, this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * n_hidden_1_ + n_hidden_1_ * this->training_labels.dimension(1)) .reshape(Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * n_hidden_1_ + n_hidden_1_ * this->training_labels.dimension(1) })); else gaussian_samples.setZero(); // Assign the input data input_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * n_hidden_0_ })) = gaussian_samples.slice( Eigen::array<Eigen::Index, 3>({ 0, 0, 0 }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * n_hidden_0_ })); input_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) + this->training_data.dimension(1) * n_hidden_0_ }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, n_hidden_0_ * n_hidden_1_ })) = gaussian_samples.slice( Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) * n_hidden_0_ }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, n_hidden_0_ * n_hidden_1_ })); input_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) + this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * n_hidden_1_ }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, n_hidden_1_ * this->training_labels.dimension(1) })) = gaussian_samples.slice( Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * n_hidden_1_ }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, n_hidden_1_ * this->training_labels.dimension(1) })); } else if (n_hidden_0_ > 0) { assert(n_output_nodes == this->training_labels.dimension(1) + 2 * this->training_data.dimension(1) * n_hidden_0_ + 2 * n_hidden_0_ * this->training_labels.dimension(1)); assert(n_metric_output_nodes == this->training_labels.dimension(1) + this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * this->training_labels.dimension(1)); assert(n_input_nodes == this->training_data.dimension(1) + this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * this->training_labels.dimension(1)); // Gaussian sampler input/output data Eigen::Tensor<TensorT, 3> gaussian_samples(batch_size, memory_size, this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * this->training_labels.dimension(1)); if (is_train) gaussian_samples = GaussianSampler<TensorT>(batch_size * memory_size, this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * this->training_labels.dimension(1)) .reshape(Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * n_hidden_0_ + n_hidden_0_ * this->training_labels.dimension(1) })); else gaussian_samples.setZero(); // Assign the input data input_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * n_hidden_0_ })) = gaussian_samples.slice( Eigen::array<Eigen::Index, 3>({ 0, 0, 0 }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * n_hidden_0_ })); input_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) + this->training_data.dimension(1) * n_hidden_0_ }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, n_hidden_0_ * this->training_labels.dimension(1) })) = gaussian_samples.slice( Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) * n_hidden_0_ }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, n_hidden_0_ * this->training_labels.dimension(1) })); } else { assert(n_output_nodes == this->training_labels.dimension(1) + 2 * this->training_data.dimension(1) * this->training_labels.dimension(1)); assert(n_metric_output_nodes == this->training_labels.dimension(1) + this->training_data.dimension(1) * this->training_labels.dimension(1)); assert(n_input_nodes == this->training_data.dimension(1) + this->training_data.dimension(1) * this->training_labels.dimension(1)); // Gaussian sampler input/output data Eigen::Tensor<TensorT, 3> gaussian_samples(batch_size, memory_size, this->training_data.dimension(1) * this->training_labels.dimension(1)); if (is_train) gaussian_samples = GaussianSampler<TensorT>(batch_size * memory_size, this->training_data.dimension(1) * this->training_labels.dimension(1)) .reshape(Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * this->training_labels.dimension(1) })); else gaussian_samples.setZero(); // Assign the input data input_data.slice(Eigen::array<Eigen::Index, 3>({ 0, 0, this->training_data.dimension(1) }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * this->training_labels.dimension(1) })) = gaussian_samples.slice( Eigen::array<Eigen::Index, 3>({ 0, 0, 0 }), Eigen::array<Eigen::Index, 3>({ batch_size, memory_size, this->training_data.dimension(1) * this->training_labels.dimension(1) })); } } else { assert(n_output_nodes == this->training_labels.dimension(1)); assert(n_metric_output_nodes == this->training_labels.dimension(1)); assert(n_input_nodes == this->training_data.dimension(1)); } } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { simulateData(input_data, loss_output_data, metric_output_data, time_steps, true); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { simulateData(input_data, loss_output_data, metric_output_data, time_steps, false); } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicatorExperimental<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerExperimentalDefaultDevice<TensorT> {}; /** @brief Image classification MNIST example whereby all pixels are linearized and read into the model. The model then attempts to classify the image using a Bayesian fully connected architecture Data processing: - whole image pixels (linearized) 28x28 normalized to 0 to 1 - classifier (1 hot vector from 0 to 9) */ template<class ...ParameterTypes> void main_MNIST(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the population trainer parameters PopulationTrainerExt<float> population_trainer; setPopulationTrainerParameters(population_trainer, args...); // define the population logger PopulationLogger<float> population_logger(true, true); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false, false); // define the data simulator const std::size_t input_size = 784; const std::size_t training_data_size = 60000; //60000; const std::size_t validation_data_size = 10000; //10000; const std::size_t output_size = 10; DataSimulatorExt<float> data_simulator; data_simulator.n_hidden_0_ = std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(); data_simulator.n_hidden_1_ = std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(); data_simulator.add_gaussian_ = std::get<EvoNetParameters::ModelTrainer::AddGaussian>(parameters).get(); // read in the training data std::string training_data_filename = std::get<EvoNetParameters::General::DataDir>(parameters).get() + "train-images.idx3-ubyte"; std::string training_labels_filename = std::get<EvoNetParameters::General::DataDir>(parameters).get() + "train-labels.idx1-ubyte"; data_simulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // read in the validation data std::string validation_data_filename = std::get<EvoNetParameters::General::DataDir>(parameters).get() + "t10k-images.idx3-ubyte"; std::string validation_labels_filename = std::get<EvoNetParameters::General::DataDir>(parameters).get() + "t10k-labels.idx1-ubyte"; data_simulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); data_simulator.unitScaleData(); // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the encoding nodes and add them to the input assert(( std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() > 0 && std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() > 0) || ( std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() == 0 && std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() == 0) ); if (std::get<EvoNetParameters::ModelTrainer::AddGaussian>(parameters).get()) { if (std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() > 0 && std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() > 0) { for (int i = 0; i < input_size; ++i) { for (int j = 0; j < std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(); ++j) { char name_char[512]; sprintf(name_char, "EN0-Input_%012d-Gaussian_%012d-Sampler", i, j); std::string name(name_char); input_nodes.push_back(name); } } for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(); ++i) { for (int j = 0; j < std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(); ++j) { char name_char[512]; sprintf(name_char, "EN1-EN0_%012d-Gaussian_%012d-Sampler", i, j); std::string name(name_char); input_nodes.push_back(name); } } for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(); ++i) { for (int j = 0; j < data_simulator.training_labels.dimension(1); ++j) { char name_char[512]; sprintf(name_char, "EN2-EN1_%012d-Gaussian_%012d-Sampler", i, j); std::string name(name_char); input_nodes.push_back(name); } } } else { for (int i = 0; i < input_size; ++i) { for (int j = 0; j < data_simulator.training_labels.dimension(1); ++j) { char name_char[512]; sprintf(name_char, "EN2-Input_%012d-Gaussian_%012d-Sampler", i, j); std::string name(name_char); input_nodes.push_back(name); } } } } // Make the output nodes std::vector<std::string> output_nodes; for (int i = 0; i < data_simulator.mnist_labels.size(); ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // Make the mu nodes and logvar nodes std::vector<std::string> encoding_nodes_en0posterior, encoding_nodes_en1posterior, encoding_nodes_en2posterior; std::vector<std::string> encoding_nodes_en0prior, encoding_nodes_en1prior, encoding_nodes_en2prior; std::vector<std::string> encoding_nodes_en0logvar, encoding_nodes_en1logvar, encoding_nodes_en2logvar; if (std::get<EvoNetParameters::ModelTrainer::AddGaussian>(parameters).get()) { if (std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() > 0 && std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() > 0) { for (int i = 0; i < input_size; ++i) { for (int j = 0; j < std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(); ++j) { char* name_char = new char[512]; sprintf(name_char, "EN0Posterior_%012d", i * std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() + j); std::string name(name_char); encoding_nodes_en0posterior.push_back(name); name_char = new char[512]; sprintf(name_char, "EN0Prior_%012d", i * std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() + j); name = name_char; encoding_nodes_en0prior.push_back(name); name_char = new char[512]; sprintf(name_char, "EN0LogVar_%012d", i * std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() + j); name = name_char; encoding_nodes_en0logvar.push_back(name); delete[] name_char; } } for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(); ++i) { for (int j = 0; j < std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(); ++j) { char* name_char = new char[512]; sprintf(name_char, "EN1Posterior_%012d", i * std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() + j); std::string name(name_char); encoding_nodes_en1posterior.push_back(name); name_char = new char[512]; sprintf(name_char, "EN1Prior_%012d", i * std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() + j); name = name_char; encoding_nodes_en1prior.push_back(name); name_char = new char[512]; sprintf(name_char, "EN1LogVar_%012d", i * std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() + j); name = name_char; encoding_nodes_en1logvar.push_back(name); delete[] name_char; } } for (int i = 0; i < std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(); ++i) { for (int j = 0; j < data_simulator.training_labels.dimension(1); ++j) { char* name_char = new char[512]; sprintf(name_char, "EN2Posterior_%012d", i * data_simulator.training_labels.dimension(1) + j); std::string name(name_char); encoding_nodes_en2posterior.push_back(name); name_char = new char[512]; sprintf(name_char, "EN2Prior_%012d", i * data_simulator.training_labels.dimension(1) + j); name = name_char; encoding_nodes_en2prior.push_back(name); name_char = new char[512]; sprintf(name_char, "EN2LogVar_%012d", i * data_simulator.training_labels.dimension(1) + j); name = name_char; encoding_nodes_en2logvar.push_back(name); delete[] name_char; } } } else { for (int i = 0; i < input_size; ++i) { for (int j = 0; j < data_simulator.training_labels.dimension(1); ++j) { char* name_char = new char[512]; sprintf(name_char, "EN2Posterior_%012d", i * data_simulator.training_labels.dimension(1) + j); std::string name(name_char); encoding_nodes_en2posterior.push_back(name); name_char = new char[512]; sprintf(name_char, "EN2Prior_%012d", i * data_simulator.training_labels.dimension(1) + j); name = name_char; encoding_nodes_en2prior.push_back(name); name_char = new char[512]; sprintf(name_char, "EN2LogVar_%012d", i * data_simulator.training_labels.dimension(1) + j); name = name_char; encoding_nodes_en2logvar.push_back(name); delete[] name_char; } } } } // define the model interpreters std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; setModelInterpreterParameters(model_interpreters, args...); // define the model trainer ModelTrainerExt<float> model_trainer; setModelTrainerParameters(model_trainer, args...); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1, loss_function_helper2, loss_function_helper3; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<CrossEntropyWithLogitsLossOp<float>>(CrossEntropyWithLogitsLossOp<float>(1e-24, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<CrossEntropyWithLogitsLossGradOp<float>>(CrossEntropyWithLogitsLossGradOp<float>(1e-24, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); if (std::get<EvoNetParameters::ModelTrainer::AddGaussian>(parameters).get()) { if (std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() > 0) { loss_function_helper2.output_nodes_ = encoding_nodes_en0posterior; loss_function_helper2.loss_functions_ = { std::make_shared<NegativeLogLikelihoodLossOp<float>>(NegativeLogLikelihoodLossOp<float>(1e-6, -1 / model_trainer.getBatchSize())) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<NegativeLogLikelihoodLossGradOp<float>>(NegativeLogLikelihoodLossGradOp<float>(1e-6, -1 / model_trainer.getBatchSize())) }; loss_function_helpers.push_back(loss_function_helper2); loss_function_helper3.output_nodes_ = encoding_nodes_en0prior; loss_function_helper3.loss_functions_ = { std::make_shared<NegativeLogLikelihoodLossOp<float>>(NegativeLogLikelihoodLossOp<float>(1e-6, 1 / model_trainer.getBatchSize())) }; loss_function_helper3.loss_function_grads_ = { std::make_shared<NegativeLogLikelihoodLossGradOp<float>>(NegativeLogLikelihoodLossGradOp<float>(1e-6, 1 / model_trainer.getBatchSize())) }; loss_function_helpers.push_back(loss_function_helper3); } if (std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() > 0) { loss_function_helper2.output_nodes_ = encoding_nodes_en1posterior; loss_function_helper2.loss_functions_ = { std::make_shared<NegativeLogLikelihoodLossOp<float>>(NegativeLogLikelihoodLossOp<float>(1e-6, -1 / model_trainer.getBatchSize())) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<NegativeLogLikelihoodLossGradOp<float>>(NegativeLogLikelihoodLossGradOp<float>(1e-6, -1 / model_trainer.getBatchSize())) }; loss_function_helpers.push_back(loss_function_helper2); loss_function_helper3.output_nodes_ = encoding_nodes_en1prior; loss_function_helper3.loss_functions_ = { std::make_shared<NegativeLogLikelihoodLossOp<float>>(NegativeLogLikelihoodLossOp<float>(1e-6, 1 / model_trainer.getBatchSize())) }; loss_function_helper3.loss_function_grads_ = { std::make_shared<NegativeLogLikelihoodLossGradOp<float>>(NegativeLogLikelihoodLossGradOp<float>(1e-6, 1 / model_trainer.getBatchSize())) }; loss_function_helpers.push_back(loss_function_helper3); } loss_function_helper2.output_nodes_ = encoding_nodes_en2posterior; loss_function_helper2.loss_functions_ = { std::make_shared<NegativeLogLikelihoodLossOp<float>>(NegativeLogLikelihoodLossOp<float>(1e-6, -1 / model_trainer.getBatchSize())) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<NegativeLogLikelihoodLossGradOp<float>>(NegativeLogLikelihoodLossGradOp<float>(1e-6, -1 / model_trainer.getBatchSize())) }; loss_function_helpers.push_back(loss_function_helper2); loss_function_helper3.output_nodes_ = encoding_nodes_en2prior; loss_function_helper3.loss_functions_ = { std::make_shared<NegativeLogLikelihoodLossOp<float>>(NegativeLogLikelihoodLossOp<float>(1e-6, 1 / model_trainer.getBatchSize())) }; loss_function_helper3.loss_function_grads_ = { std::make_shared<NegativeLogLikelihoodLossGradOp<float>>(NegativeLogLikelihoodLossGradOp<float>(1e-6, 1 / model_trainer.getBatchSize())) }; loss_function_helpers.push_back(loss_function_helper3); } model_trainer.setLossFunctionHelpers(loss_function_helpers); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1, metric_function_helper2; metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<AccuracyMCMicroOp<float>>(AccuracyMCMicroOp<float>()), std::make_shared<PrecisionMCMicroOp<float>>(PrecisionMCMicroOp<float>()) }; metric_function_helper1.metric_names_ = { "AccuracyMCMicro", "PrecisionMCMicro" }; metric_function_helpers.push_back(metric_function_helper1); if (std::get<EvoNetParameters::ModelTrainer::AddGaussian>(parameters).get()) { if (std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get() > 0) { metric_function_helper1.output_nodes_ = encoding_nodes_en0logvar; metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>()) }; metric_function_helper1.metric_names_ = { "MAE_EN0LogVar" }; metric_function_helpers.push_back(metric_function_helper1); } if (std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get() > 0) { metric_function_helper1.output_nodes_ = encoding_nodes_en1logvar; metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>()) }; metric_function_helper1.metric_names_ = { "MAE_EN1LogVar" }; metric_function_helpers.push_back(metric_function_helper1); } metric_function_helper1.output_nodes_ = encoding_nodes_en2logvar; metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>()) }; metric_function_helper1.metric_names_ = { "MAE_EN2LogVar" }; metric_function_helpers.push_back(metric_function_helper1); } model_trainer.setMetricFunctionHelpers(metric_function_helpers); // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; setModelReplicatorParameters(model_replicator, args...); // define the initial population Model<float> model; if (std::get<EvoNetParameters::Main::MakeModel>(parameters).get()) { std::cout << "Making the model..." << std::endl; model_trainer.makeFullyConnBayes(model, input_size, output_size, std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::AddGaussian>(parameters).get(), -1, -4, 0.5, true, std::get<EvoNetParameters::ModelTrainer::LearningRate>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::GradientClipping>(parameters).get()); // Baseline model.setId(0); } else { ModelFile<float> model_file; ModelInterpreterFileDefaultDevice<float> model_interpreter_file; loadModelFromParameters(model, model_interpreters.at(0), model_file, model_interpreter_file, args...); } model.setName(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get()); //So that all output will be written to a specific directory // Run the training, evaluation, or evolution runTrainEvalEvoFromParameters<float>(model, model_interpreters, model_trainer, population_trainer, model_replicator, data_simulator, model_logger, population_logger, input_nodes, args...); } /// MNIST_CovNet_example 0 C:/Users/dmccloskey/Documents/GitHub/mnist/Parameters.csv int main(int argc, char** argv) { // Parse the user commands int id_int = -1; std::string parameters_filename = ""; parseCommandLineArguments(argc, argv, id_int, parameters_filename); // Set the parameter names and defaults EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::General::OutputDir output_dir("output_dir", std::string("")); EvoNetParameters::Main::DeviceId device_id("device_id", 0); EvoNetParameters::Main::ModelName model_name("model_name", ""); EvoNetParameters::Main::MakeModel make_model("make_model", true); EvoNetParameters::Main::LoadModelCsv load_model_csv("load_model_csv", false); EvoNetParameters::Main::LoadModelBinary load_model_binary("load_model_binary", false); EvoNetParameters::Main::TrainModel train_model("train_model", true); EvoNetParameters::Main::EvolveModel evolve_model("evolve_model", false); EvoNetParameters::Main::EvaluateModel evaluate_model("evaluate_model", false); EvoNetParameters::Main::EvaluateModels evaluate_models("evaluate_models", false); EvoNetParameters::Examples::ModelType model_type("model_type", "Solution"); EvoNetParameters::Examples::SimulationType simulation_type("simulation_type", ""); EvoNetParameters::PopulationTrainer::PopulationName population_name("population_name", ""); EvoNetParameters::PopulationTrainer::NGenerations n_generations("n_generations", 1); EvoNetParameters::PopulationTrainer::NInterpreters n_interpreters("n_interpreters", 1); EvoNetParameters::PopulationTrainer::PruneModelNum prune_model_num("prune_model_num", 10); EvoNetParameters::PopulationTrainer::RemoveIsolatedNodes remove_isolated_nodes("remove_isolated_nodes", true); EvoNetParameters::PopulationTrainer::CheckCompleteModelInputToOutput check_complete_model_input_to_output("check_complete_model_input_to_output", true); EvoNetParameters::PopulationTrainer::PopulationSize population_size("population_size", 128); EvoNetParameters::PopulationTrainer::NTop n_top("n_top", 8); EvoNetParameters::PopulationTrainer::NRandom n_random("n_random", 8); EvoNetParameters::PopulationTrainer::NReplicatesPerModel n_replicates_per_model("n_replicates_per_model", 1); EvoNetParameters::PopulationTrainer::ResetModelCopyWeights reset_model_copy_weights("reset_model_copy_weights", true); EvoNetParameters::PopulationTrainer::ResetModelTemplateWeights reset_model_template_weights("reset_model_template_weights", true); EvoNetParameters::PopulationTrainer::Logging population_logging("population_logging", true); EvoNetParameters::PopulationTrainer::SetPopulationSizeFixed set_population_size_fixed("set_population_size_fixed", false); EvoNetParameters::PopulationTrainer::SetPopulationSizeDoubling set_population_size_doubling("set_population_size_doubling", true); EvoNetParameters::PopulationTrainer::SetTrainingStepsByModelSize set_training_steps_by_model_size("set_training_steps_by_model_size", false); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 32); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 1); EvoNetParameters::ModelTrainer::NEpochsTraining n_epochs_training("n_epochs_training", 1000); EvoNetParameters::ModelTrainer::NEpochsValidation n_epochs_validation("n_epochs_validation", 25); EvoNetParameters::ModelTrainer::NEpochsEvaluation n_epochs_evaluation("n_epochs_evaluation", 10); EvoNetParameters::ModelTrainer::NTBTTSteps n_tbtt_steps("n_tbtt_steps", 64); EvoNetParameters::ModelTrainer::NTETTSteps n_tett_steps("n_tett_steps", 64); EvoNetParameters::ModelTrainer::Verbosity verbosity("verbosity", 1); EvoNetParameters::ModelTrainer::LoggingTraining logging_training("logging_training", true); EvoNetParameters::ModelTrainer::LoggingValidation logging_validation("logging_validation", false); EvoNetParameters::ModelTrainer::LoggingEvaluation logging_evaluation("logging_evaluation", true); EvoNetParameters::ModelTrainer::FindCycles find_cycles("find_cycles", true); EvoNetParameters::ModelTrainer::FastInterpreter fast_interpreter("fast_interpreter", true); EvoNetParameters::ModelTrainer::PreserveOoO preserve_ooo("preserve_ooo", true); EvoNetParameters::ModelTrainer::InterpretModel interpret_model("interpret_model", true); EvoNetParameters::ModelTrainer::ResetInterpreter reset_interpreter("reset_interpreter", true); EvoNetParameters::ModelTrainer::ResetModel reset_model("reset_model", false); EvoNetParameters::ModelTrainer::NHidden0 n_hidden_0("n_hidden_0", 128); EvoNetParameters::ModelTrainer::NHidden1 n_hidden_1("n_hidden_1", 128); EvoNetParameters::ModelTrainer::NHidden2 n_hidden_2("n_hidden_2", 0); EvoNetParameters::ModelTrainer::AddGaussian add_gaussian("add_gaussian", true); EvoNetParameters::ModelTrainer::AddMixedGaussian add_mixed_gaussian("add_mixed_gaussian", false); EvoNetParameters::ModelTrainer::LearningRate learning_rate("learning_rate", 1e-3); EvoNetParameters::ModelTrainer::GradientClipping gradient_clipping("gradient_clipping", 10); EvoNetParameters::ModelReplicator::NNodeDownAdditionsLB n_node_down_additions_lb("n_node_down_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsLB n_node_right_additions_lb("n_node_right_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesLB n_node_down_copies_lb("n_node_down_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesLB n_node_right_copies_lb("n_node_right_copies_lb", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsLB n_link_additons_lb("n_link_additons_lb", 0); EvoNetParameters::ModelReplicator::NLinkCopiesLB n_link_copies_lb("n_link_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsLB n_node_deletions_lb("n_node_deletions_lb", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsLB n_link_deletions_lb("n_link_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesLB n_node_activation_changes_lb("n_node_activation_changes_lb", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesLB n_node_integration_changes_lb("n_node_integration_changes_lb", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsLB n_module_additions_lb("n_module_additions_lb", 0); EvoNetParameters::ModelReplicator::NModuleCopiesLB n_module_copies_lb("n_module_copies_lb", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsLB n_module_deletions_lb("n_module_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownAdditionsUB n_node_down_additions_ub("n_node_down_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsUB n_node_right_additions_ub("n_node_right_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesUB n_node_down_copies_ub("n_node_down_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesUB n_node_right_copies_ub("n_node_right_copies_ub", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsUB n_link_additons_ub("n_link_additons_ub", 0); EvoNetParameters::ModelReplicator::NLinkCopiesUB n_link_copies_ub("n_link_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsUB n_node_deletions_ub("n_node_deletions_ub", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsUB n_link_deletions_ub("n_link_deletions_ub", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesUB n_node_activation_changes_ub("n_node_activation_changes_ub", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesUB n_node_integration_changes_ub("n_node_integration_changes_ub", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsUB n_module_additions_ub("n_module_additions_ub", 0); EvoNetParameters::ModelReplicator::NModuleCopiesUB n_module_copies_ub("n_module_copies_ub", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsUB n_module_deletions_ub("n_module_deletions_ub", 0); EvoNetParameters::ModelReplicator::SetModificationRateFixed set_modification_rate_fixed("set_modification_rate_fixed", false); EvoNetParameters::ModelReplicator::SetModificationRateByPrevError set_modification_rate_by_prev_error("set_modification_rate_by_prev_error", false); auto parameters = std::make_tuple(id, data_dir, output_dir, device_id, model_name, make_model, load_model_csv, load_model_binary, train_model, evolve_model, evaluate_model, evaluate_models, model_type, simulation_type, population_name, n_generations, n_interpreters, prune_model_num, remove_isolated_nodes, check_complete_model_input_to_output, population_size, n_top, n_random, n_replicates_per_model, reset_model_copy_weights, reset_model_template_weights, population_logging, set_population_size_fixed, set_population_size_doubling, set_training_steps_by_model_size, batch_size, memory_size, n_epochs_training, n_epochs_validation, n_epochs_evaluation, n_tbtt_steps, n_tett_steps, verbosity, logging_training, logging_validation, logging_evaluation, find_cycles, fast_interpreter, preserve_ooo, interpret_model, reset_model, reset_interpreter, n_hidden_0, n_hidden_1, n_hidden_2, add_gaussian, add_mixed_gaussian, learning_rate, gradient_clipping, n_node_down_additions_lb, n_node_right_additions_lb, n_node_down_copies_lb, n_node_right_copies_lb, n_link_additons_lb, n_link_copies_lb, n_node_deletions_lb, n_link_deletions_lb, n_node_activation_changes_lb, n_node_integration_changes_lb, n_module_additions_lb, n_module_copies_lb, n_module_deletions_lb, n_node_down_additions_ub, n_node_right_additions_ub, n_node_down_copies_ub, n_node_right_copies_ub, n_link_additons_ub, n_link_copies_ub, n_node_deletions_ub, n_link_deletions_ub, n_node_activation_changes_ub, n_node_integration_changes_ub, n_module_additions_ub, n_module_copies_ub, n_module_deletions_ub, set_modification_rate_fixed, set_modification_rate_by_prev_error); // Read in the parameters LoadParametersFromCsv loadParametersFromCsv(id_int, parameters_filename); parameters = EvoNet::apply([&loadParametersFromCsv](auto&& ...args) { return loadParametersFromCsv(args...); }, parameters); // Run the application EvoNet::apply([](auto&& ...args) { main_MNIST(args ...); }, parameters); return 0; }<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Preprocessing test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/core/Preprocessing.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(preprocessing) BOOST_AUTO_TEST_CASE(P_selectRandomElement) { // [TODO: make test; currently, combined with selectRandomNode1] } BOOST_AUTO_TEST_CASE(P_UnitScaleFunctor) { Eigen::Tensor<float, 2> data(2, 2); data.setValues({{ 0, 2 }, { 3, 4 }}); UnitScaleFunctor<float> unit_scale(data); BOOST_CHECK_CLOSE(unit_scale.getUnitScale(), 0.25, 1e-6); Eigen::Tensor<float, 2> data_test = data.unaryExpr(UnitScaleFunctor<float>(data)); BOOST_CHECK_CLOSE(data_test(0, 0), 0.0, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1), 1.0, 1e-6); } BOOST_AUTO_TEST_CASE(P_LinearScaleFunctor) { Eigen::Tensor<float, 2> data(2, 2); data.setValues({ { 0, 2 }, { 4, 8 } }); Eigen::Tensor<float, 2> data_test = data.unaryExpr(LinearScaleFunctor<float>(0, 8, -1, 1)); BOOST_CHECK_CLOSE(data_test(0, 0), -1.0, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1), -0.5, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0), 0.0, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1), 1.0, 1e-6); } BOOST_AUTO_TEST_CASE(P_LinearScale) { Eigen::Tensor<float, 3> data(2, 2, 2); data.setValues({ {{ 0, 2 }, { 4, 8 }}, {{ 1, 1 }, { 3, 5 }} }); // Test default initialization for the domain and setters LinearScale<float, 3> linearScale1(-1, 1); linearScale1.setDomain(0, 8); Eigen::Tensor<float, 3> data_test = linearScale1(data); BOOST_CHECK_CLOSE(data_test(0, 0, 0), -1.0, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 0, 1), -0.5, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1, 0), 0.0, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1, 1), 1.0, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0, 0), -0.75, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0, 1), -0.75, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1, 0), -0.25, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1, 1), 0.25, 1e-6); // Test with manual domain and range initialization LinearScale<float, 3> linearScale(0, 8, -1, 1); data_test = linearScale(data); BOOST_CHECK_CLOSE(data_test(0, 0, 0), -1.0, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 0, 1), -0.5, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1, 0), 0.0, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1, 1), 1.0, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0, 0), -0.75, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0, 1), -0.75, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1, 0), -0.25, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1, 1), 0.25, 1e-6); // Test with domain calculation and range initialization LinearScale<float, 3> linearScale2(data, -1, 1); data_test = linearScale2(data); BOOST_CHECK_CLOSE(data_test(0, 0, 0), -1.0, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 0, 1), -0.5, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1, 0), 0.0, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1, 1), 1.0, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0, 0), -0.75, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0, 1), -0.75, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1, 0), -0.25, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1, 1), 0.25, 1e-6); } BOOST_AUTO_TEST_CASE(P_Standardize) { Eigen::Tensor<float, 3> data(2, 2, 2); data.setValues({ {{ 0, 2 }, { 4, 8 }}, {{ 1, 3 }, { 3, 5 }} }); // Test default initialization with setters and getters Standardize<float, 3> standardize1; standardize1.setMeanAndVar(1, 2); BOOST_CHECK_CLOSE(standardize1.getMean(), 1, 1e-6); BOOST_CHECK_CLOSE(standardize1.getVar(), 2, 1e-6); standardize1.setMeanAndVar(data); BOOST_CHECK_CLOSE(standardize1.getMean(), 3.25, 1e-6); BOOST_CHECK_CLOSE(standardize1.getVar(), 6.21428585, 1e-6); // Test with data initialization and getters Standardize<float, 3> standardize(data); BOOST_CHECK_CLOSE(standardize.getMean(), 3.25, 1e-6); BOOST_CHECK_CLOSE(standardize.getVar(), 6.21428585, 1e-6); // Test operator Eigen::Tensor<float, 3> data_test = standardize(data); BOOST_CHECK_CLOSE(data_test(0, 0, 0), -1.30373025, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 0, 1), -0.501434684, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1, 0), 0.300860822, 1e-6); BOOST_CHECK_CLOSE(data_test(0, 1, 1), 1.90545189, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0, 0), -0.902582467, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 0, 1), -0.100286946, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1, 0), -0.100286946, 1e-6); BOOST_CHECK_CLOSE(data_test(1, 1, 1), 0.702008605, 1e-6); } BOOST_AUTO_TEST_CASE(P_MakeShuffleMatrix) { const int shuffle_dim_size = 8; std::vector<int> indices = { 0, 1, 2, 3, 4, 5, 6, 7 }; // Test default initialization with setters and getters MakeShuffleMatrix<float> shuffle1; shuffle1.setIndices(8); BOOST_CHECK(shuffle1.getIndices() != indices); for (int i = 0; i < shuffle_dim_size; ++i) { BOOST_CHECK_GE(shuffle1.getIndices().at(i), 0); BOOST_CHECK_LE(shuffle1.getIndices().at(i), 7); } shuffle1.setShuffleMatrix(true); //std::cout << "Shuffle_matrix\n" << shuffle1.getShuffleMatrix() << std::endl; for (int i = 0; i < shuffle_dim_size; ++i) { Eigen::Tensor<float, 0> row_sum = shuffle1.getShuffleMatrix().chip(i, 0).sum(); BOOST_CHECK_EQUAL(row_sum(0), 1); } // Test initialization with dim size MakeShuffleMatrix<float> shuffle2(shuffle_dim_size, true); BOOST_CHECK(shuffle2.getIndices() != indices); for (int i = 0; i < shuffle_dim_size; ++i) { BOOST_CHECK_GE(shuffle2.getIndices().at(i), 0); BOOST_CHECK_LE(shuffle2.getIndices().at(i), 7); } // Test initialization with indices to use MakeShuffleMatrix<float> shuffle3(indices, true); BOOST_CHECK(shuffle3.getIndices() == indices); //std::cout << "Shuffle_matrix\n" << shuffle3.getShuffleMatrix() << std::endl; for (int i = 0; i < shuffle_dim_size; ++i) { BOOST_CHECK_EQUAL(shuffle3.getShuffleMatrix()(i, i), 1); Eigen::Tensor<float, 0> row_sum = shuffle3.getShuffleMatrix().chip(i, 0).sum(); BOOST_CHECK_EQUAL(row_sum(0), 1); } // Test row/column shuffling on toy data Eigen::Tensor<float, 2> data(2, 3); data.setValues({ {1,2,3},{4,5,6} }); MakeShuffleMatrix<float> shuffle_col(std::vector<int>({1,2,0}), true); Eigen::Tensor<float, 2> col_shuffle = data; shuffle_col(col_shuffle, true); BOOST_CHECK_EQUAL(col_shuffle(0, 0), 2); BOOST_CHECK_EQUAL(col_shuffle(0, 1), 3); BOOST_CHECK_EQUAL(col_shuffle(0, 2), 1); BOOST_CHECK_EQUAL(col_shuffle(1, 0), 5); BOOST_CHECK_EQUAL(col_shuffle(1, 1), 6); BOOST_CHECK_EQUAL(col_shuffle(1, 2), 4); MakeShuffleMatrix<float> shuffle_row(std::vector<int>({ 1,0 }), false); Eigen::Tensor<float, 2> row_shuffle = data; shuffle_row(row_shuffle, false); BOOST_CHECK_EQUAL(row_shuffle(0, 0), 4); BOOST_CHECK_EQUAL(row_shuffle(0, 1), 5); BOOST_CHECK_EQUAL(row_shuffle(0, 2), 6); BOOST_CHECK_EQUAL(row_shuffle(1, 0), 1); BOOST_CHECK_EQUAL(row_shuffle(1, 1), 2); BOOST_CHECK_EQUAL(row_shuffle(1, 2), 3); // Test row/column shuffling on toy data Eigen::Tensor<double, 2> data_db(2, 3); data_db.setValues({ {1,2,3},{4,5,6} }); MakeShuffleMatrix<double> shuffle_col_db(std::vector<int>({ 1,2,0 }), true); Eigen::Tensor<double, 2> col_shuffle_db = data_db; shuffle_col_db(col_shuffle_db, true); BOOST_CHECK_EQUAL(col_shuffle_db(0, 0), 2); BOOST_CHECK_EQUAL(col_shuffle_db(0, 1), 3); BOOST_CHECK_EQUAL(col_shuffle_db(0, 2), 1); BOOST_CHECK_EQUAL(col_shuffle_db(1, 0), 5); BOOST_CHECK_EQUAL(col_shuffle_db(1, 1), 6); BOOST_CHECK_EQUAL(col_shuffle_db(1, 2), 4); MakeShuffleMatrix<double> shuffle_row_db(std::vector<int>({ 1,0 }), false); Eigen::Tensor<double, 2> row_shuffle_db = data_db; shuffle_row_db(row_shuffle_db, false); BOOST_CHECK_EQUAL(row_shuffle_db(0, 0), 4); BOOST_CHECK_EQUAL(row_shuffle_db(0, 1), 5); BOOST_CHECK_EQUAL(row_shuffle_db(0, 2), 6); BOOST_CHECK_EQUAL(row_shuffle_db(1, 0), 1); BOOST_CHECK_EQUAL(row_shuffle_db(1, 1), 2); BOOST_CHECK_EQUAL(row_shuffle_db(1, 2), 3); } BOOST_AUTO_TEST_CASE(P_LabelSmoother) { Eigen::Tensor<float, 1> data(2); data.setValues({ 0, 1 }); Eigen::Tensor<float, 1> data_test = data.unaryExpr(LabelSmoother<float>(0.1, 0.2)); BOOST_CHECK_CLOSE(data_test(0), 0.1, 1e-4); BOOST_CHECK_CLOSE(data_test(1), 0.8, 1e-4); } BOOST_AUTO_TEST_CASE(P_OneHotEncoder) { // TODO } BOOST_AUTO_TEST_CASE(SFcheckNan) { Eigen::Tensor<float, 1> values(2); values.setConstant(5.0f); Eigen::Tensor<float, 1> test(2); // control test = values.unaryExpr([](float c) { return checkNan<float>(c); }); BOOST_CHECK_CLOSE(test(0), 5.0, 1e-3); BOOST_CHECK_CLOSE(test(1), 5.0, 1e-3); // test values(0) = NAN; //NaN values(1) = INFINITY; //infinity test = values.unaryExpr([](float c) { return checkNan<float>(c); }); BOOST_CHECK_CLOSE(test(0), NAN, 1e-3); BOOST_CHECK_CLOSE(test(1), INFINITY, 1e-3); } BOOST_AUTO_TEST_CASE(SFsubstituteNanInf) { Eigen::Tensor<float, 1> values(3); values.setConstant(5.0f); Eigen::Tensor<float, 1> test(3); // control test = values.unaryExpr([](float c) { return substituteNanInf<float>(c); }); BOOST_CHECK_CLOSE(test(0), 5.0, 1e-3); BOOST_CHECK_CLOSE(test(1), 5.0, 1e-3); // test values(0) = NAN; //NaN values(1) = INFINITY; //infinity values(2) = -INFINITY; //infinity test = values.unaryExpr([](float c) { return substituteNanInf<float>(c); }); BOOST_CHECK_CLOSE(test(0), 0.0, 1e-3); BOOST_CHECK_CLOSE(test(1), 1e9, 1e-3); BOOST_CHECK_CLOSE(test(2), -1e9, 1e-3); } BOOST_AUTO_TEST_CASE(SFClipOp) { Eigen::Tensor<float, 1> net_input(3); net_input.setValues({ 0.0f, 1.0f, 0.5f }); // test input Eigen::Tensor<float, 1> result = net_input.unaryExpr(ClipOp<float>(0.1f, 0.0f, 1.0f)); BOOST_CHECK_CLOSE(result(0), 0.1, 1e-3); BOOST_CHECK_CLOSE(result(1), 0.9, 1e-3); BOOST_CHECK_CLOSE(result(2), 0.5, 1e-3); } BOOST_AUTO_TEST_CASE(SFRandBinaryOp) { Eigen::Tensor<float, 1> net_input(2); net_input.setValues({ 2.0f, 2.0f }); Eigen::Tensor<float, 1> result; // test input result = net_input.unaryExpr(RandBinaryOp<float>(0.0f)); BOOST_CHECK_CLOSE(result(0), 2.0, 1e-3); BOOST_CHECK_CLOSE(result(1), 2.0, 1e-3); result = net_input.unaryExpr(RandBinaryOp<float>(1.0f)); BOOST_CHECK_CLOSE(result(0), 0.0, 1e-3); BOOST_CHECK_CLOSE(result(1), 0.0, 1e-3); } BOOST_AUTO_TEST_CASE(assertClose) { BOOST_CHECK(!assert_close<float>(1.1, 1.2, 1e-4, 1e-4)); BOOST_CHECK(assert_close<float>(1.1, 1.2, 1, 1)); } BOOST_AUTO_TEST_CASE(P_GaussianMixture) { // TODO } BOOST_AUTO_TEST_CASE(P_SwissRoll) { // TODO } BOOST_AUTO_TEST_CASE(P_GumbelSampler) { Eigen::Tensor<float, 2> gumbel_samples = GumbelSampler<float>(2, 3); BOOST_CHECK_LE(gumbel_samples(0, 0), 10); BOOST_CHECK_GE(gumbel_samples(0, 0), -10); BOOST_CHECK_LE(gumbel_samples(1, 2), 10); BOOST_CHECK_GE(gumbel_samples(1, 2), -10); std::cout << gumbel_samples << std::endl; } BOOST_AUTO_TEST_CASE(P_GaussianSampler) { Eigen::Tensor<float, 2> gaussian_samples = GaussianSampler<float>(2, 3); BOOST_CHECK_LE(gaussian_samples(0, 0), 2); BOOST_CHECK_GE(gaussian_samples(0, 0), -2); BOOST_CHECK_LE(gaussian_samples(1, 2), 2); BOOST_CHECK_GE(gaussian_samples(1, 2), -2); std::cout << gaussian_samples << std::endl; } BOOST_AUTO_TEST_SUITE_END()<file_sep>// Test file to check for stream bug in libc++ #include <sstream> int main() { // create stringstream std::stringstream ss; ss << "-4.9X"; // try to extract double followed by character double d; ss >> d; if(!ss.fail()) return 1; else return 0; } <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_POPULATIONTRAINER_H #define EVONET_POPULATIONTRAINER_H // .h #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelTrainer.h> #include <EvoNet/ml/PopulationLogger.h> #include <EvoNet/simulator/DataSimulator.h> // .cpp #include <EvoNet/io/WeightFile.h> #include <EvoNet/io/LinkFile.h> #include <EvoNet/io/NodeFile.h> #include <EvoNet/io/ModelFile.h> #include <EvoNet/io/ModelInterpreterFile.h> #include <algorithm> // tokenizing #include <regex> // tokenizing #include <utility> #include <numeric> // accumulate #include <thread> #include <future> #include <mutex> namespace EvoNet { // Concurrency helpers static std::mutex trainModel_mutex; static std::mutex validateModel_mutex; static std::mutex replicateModel_mutex; static std::mutex evalModel_mutex; std::atomic_size_t train_models_iter_{ 0 }; std::atomic_size_t validate_models_iter_{ 0 }; std::atomic_size_t replicate_models_iter_{ 0 }; std::atomic_size_t models_id_iter_{ 0 }; std::atomic_size_t eval_models_iter_{ 0 }; /** @brief Class to train a vector of models */ template<typename TensorT, typename InterpreterT> class PopulationTrainer { public: PopulationTrainer() = default; ///< Default constructor ~PopulationTrainer() = default; ///< Default destructor void setNTop(const int& n_top); ///< n_top setter void setNRandom(const int& n_random); ///< n_random setter void setNReplicatesPerModel(const int& n_replicates_per_model); ///< n_replicates_per_model setter void setNGenerations(const int& n_generations); ///< n_generations setter void setLogging(bool log_training = false); ///< enable_logging setter void setRemoveIsolatedNodes(const bool& remove_isolated_nodes); void setPruneModelNum(const int& prune_model_num); void setCheckCompleteModelInputToOutput(const bool& check_complete_input_to_output); void setSelectModels(const bool& select_models); void setResetModelCopyWeights(const bool& reset_model_copy_weights); void setResetModelTemplateWeights(const bool& reset_model_template_weights); void setPopulationSize(const int& population_size) { population_size_ = population_size; } int getNTop() const; ///< batch_size setter int getNRandom() const; ///< memory_size setter int getNReplicatesPerModel() const; ///< n_epochs setter int getNGenerations() const; ///< n_epochs setter bool getLogTraining() const; ///< log_training getter bool getRemoveIsolatedNodes() const; int getPruneModelNum() const; bool getCheckCompleteModelInputToOutput() const; bool getSelectModels() const; bool getResetModelCopyWeights() const; bool getResetModelTemplateWeights() const; int getPopulationSize() { return population_size_; } /** @brief Remove models with non-unique names from the population of models @param[in, out] models The vector (i.e., population) of models to select from */ void removeDuplicateModels(std::vector<Model<TensorT>>& models); /** @brief Select the top N models with the least error Use cases with different parameters: - Top N selection: set n_top ? 0, set n_random == 0 - Top N random selection: set n_top > 0, set n_random > 0 && n_random <= n_top - Random selection: set n_top == 0, set n_random > 0 - Binary selection: given models.size() == 2, set n_top == 1, set n_random == 0 [TESTS: add thread tests] @param[in, out] models The vector (i.e., population) of models to select from @returns a list of pairs of model_name to average validation error */ std::vector<std::tuple<int, std::string, TensorT>> selectModels( std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes); /** @brief validate all of the models @returns key value pair of model_name and model_error */ static bool validateModels_( std::vector<Model<TensorT>>& model, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes, std::vector<std::tuple<int, std::string, TensorT>>& model_validation_errors); static std::tuple<int, std::string, TensorT> validateModel_( Model<TensorT>& model, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes); /** @brief return the top N models with the lowest error. @returns key value pair of model_name and model_error */ static std::vector<std::tuple<int, std::string, TensorT>> getTopNModels_( std::vector<std::tuple<int, std::string, TensorT>> model_validation_scores, const int& n_top); /** @brief return a random list of model names. @returns key value pair of model_name and model_error */ static std::vector<std::tuple<int, std::string, TensorT>> getRandomNModels_( std::vector<std::tuple<int, std::string, TensorT>> model_validation_scores, const int& n_random); /** @brief Replicates the models in the population. Replicates are modified while the original models are persisted. Example use case: - 2 selected models are replicated 4 times with modifications resulting in a population of 10 models (2 original, and 8 modified) [TESTS: add thread tests] [TESTS: add tests for remove_isolated_nodes, prune_model_num, check_complete_input_to_output, reset_model_copy_weights, and reset_model_template_weights parameters] @param[in, out] models The vector (i.e., population) of models to modify @param[in] model_replicator The replicator to use @returns A vector of models */ void replicateModels( std::vector<Model<TensorT>>& models, ModelReplicator<TensorT>& model_replicator, const std::string& unique_str = "", const int& n_threads = 1); static bool replicateModels_( std::vector<Model<TensorT>>& models, ModelReplicator<TensorT>& model_replicator, const std::string& unique_str, const int& models_to_replicate, const int& n_replicates_per_model, const bool& remove_isolated_nodes, const int& prune_model_num, const bool& check_complete_input_to_output, const bool& reset_model_copy_weights); static std::pair<bool, Model<TensorT>> replicateModel_( const Model<TensorT>& model, ModelReplicator<TensorT>& model_replicator, const std::string& unique_str, const int& cnt, const bool& remove_isolated_nodes, const int& prune_model_num, const bool& check_complete_input_to_output, const bool& reset_model_copy_weights); /** @brief Trains each of the models in the population using the same test data set [TESTS: add thread tests] @param[in, out] models The vector of models to train @param[in] model_trainer The trainer to use */ void trainModels( std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes); static bool trainModels_( std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes); static bool trainModel_( Model<TensorT>& model, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes); /** @brief Evaluates each of the models in the population using the same test data set [TESTS: add thread tests] @param[in, out] models The vector of models to evaluate @param[in] model_trainer The trainer to use */ void evalModels( std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes); static bool evalModels_( std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreters, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes); static bool evalModel_( Model<TensorT>& model, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes); /** @brief Train the population @param[in, out] models The vector of models to evolve @param[in] population_name The name of the population (used for logging) @param[in] model_trainer The trainer to use @param[in] model_interpreters The interpreters to use for model building and training (each interpreter is given its own thread) @param[in] model_replicator The replicator to use @param[in] data_simulator The data simulate/generator to use @param[in] population_logger The population logger to use @param[in] input_nodes Vector of model input nodes */ std::vector<std::vector<std::tuple<int, std::string, TensorT>>> evolveModels( std::vector<Model<TensorT>>& models, const std::string& population_name, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelReplicator<TensorT>& model_replicator, DataSimulator<TensorT>& data_simulator, ModelLogger<TensorT>& model_logger, PopulationLogger<TensorT>& population_logger, const std::vector<std::string>& input_nodes); /** @brief Evaluate the population @param[in, out] models The vector of models to evaluate @param[in] population_name The name of the population (used for logging) @param[in] model_interpreters The interpreters to use for model building and training (each interpreter is given its own thread) @param[in] model_replicator The replicator to use @param[in] data_simulator The data simulate/generator to use @param[in] population_logger The population logger to use @param[in] input_nodes Vector of model input nodes */ void evaluateModels( std::vector<Model<TensorT>>& models, const std::string& population_name, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelReplicator<TensorT>& model_replicator, DataSimulator<TensorT>& data_simulator, ModelLogger<TensorT>& model_logger, const std::vector<std::string>& input_nodes); /** @brief Entry point for users to code their adaptive scheduler to modify models population dynamic parameters based on a given trigger @param[in] n_generations The number of evolution generations @param[in] models The models in the population @param[in] model_errors The trace of models errors from validation at each generation */ virtual void adaptivePopulationScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations); /** @brief Entry point for users to code their training logger [TODO: add tests] @param[in] n_generations The number of evolution generations @param[in, out] population_logger The population logger @param[in] models_validation_errors_per_generation The model validation errors in the population */ virtual void trainingPopulationLogger( const int& n_generations, std::vector<Model<TensorT>>& models, PopulationLogger<TensorT>& population_logger, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation); void updateNEpochsTraining(ModelTrainer<TensorT, InterpreterT>& model_trainer); ///< Update the number of training epochs void setNEpochsTraining(const int& n_epochs); ///< n_epochs setter int getNEpochsTraining() const; ///< n_epochs setter protected: // population dynamics int population_size_ = 128; ///< The total number of models in the population int n_top_ = 0; ///< The number models to select int n_random_ = 0; ///< The number of random models to select from the pool of top models int n_generations_ = 0; ///< The number of generations to evolve the models bool log_training_ = false; // model replicator settings bool remove_isolated_nodes_ = true; int prune_model_num_ = 10; bool check_complete_input_to_output_ = true; bool reset_model_copy_weights_ = false; bool reset_model_template_weights_ = false; private: bool select_models_ = true; ///< Whether to skip the selection step or not (set internally based on the replication scheme) int n_epochs_training_ = -1; ///< The number of epochs to train the models (set internally based on the `ModelInterpreter::n_epochs_training_`) int n_replicates_per_model_ = 0; ///< The number of replications per model (calculated internally based on the desired population size) }; template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::setNTop(const int & n_top) { n_top_ = n_top; } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::setNRandom(const int & n_random) { n_random_ = n_random; } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::setNReplicatesPerModel(const int & n_replicates_per_model) { n_replicates_per_model_ = n_replicates_per_model; } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::setNGenerations(const int & n_generations) { n_generations_ = n_generations; } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::setLogging(bool log_training) { log_training_ = log_training; } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::setRemoveIsolatedNodes(const bool & remove_isolated_nodes) { remove_isolated_nodes_ = remove_isolated_nodes; } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::setPruneModelNum(const int & prune_model_num) { prune_model_num_ = prune_model_num; } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::setCheckCompleteModelInputToOutput(const bool & check_complete_input_to_output) { check_complete_input_to_output_ = check_complete_input_to_output; } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::setSelectModels(const bool & select_models) { select_models_ = select_models; } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::setResetModelCopyWeights(const bool & reset_model_copy_weights) { reset_model_copy_weights_ = reset_model_copy_weights; } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::setResetModelTemplateWeights(const bool& reset_model_template_weights) { reset_model_template_weights_ = reset_model_template_weights; } template<typename TensorT, typename InterpreterT> int PopulationTrainer<TensorT, InterpreterT>::getNTop() const { return n_top_; } template<typename TensorT, typename InterpreterT> int PopulationTrainer<TensorT, InterpreterT>::getNRandom() const { return n_random_; } template<typename TensorT, typename InterpreterT> int PopulationTrainer<TensorT, InterpreterT>::getNReplicatesPerModel() const { return n_replicates_per_model_; } template<typename TensorT, typename InterpreterT> int PopulationTrainer<TensorT, InterpreterT>::getNGenerations() const { return n_generations_; } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::getLogTraining() const { return log_training_; } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::getRemoveIsolatedNodes() const { return remove_isolated_nodes_; } template<typename TensorT, typename InterpreterT> inline int PopulationTrainer<TensorT, InterpreterT>::getPruneModelNum() const { return prune_model_num_; } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::getCheckCompleteModelInputToOutput() const { return check_complete_input_to_output_; } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::getSelectModels() const { return select_models_; } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::getResetModelCopyWeights() const { return reset_model_copy_weights_; } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::getResetModelTemplateWeights() const { return reset_model_template_weights_; } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::removeDuplicateModels(std::vector<Model<TensorT>>& models) { std::map<std::string, Model<TensorT>> unique_models; for (const Model<TensorT>& model : models) unique_models.emplace(model.getName(), model); if (unique_models.size() < models.size()) { models.clear(); for (const auto& model : unique_models) { models.push_back(model.second); } } } template<typename TensorT, typename InterpreterT> std::vector<std::tuple<int, std::string, TensorT>> PopulationTrainer<TensorT, InterpreterT>::selectModels( std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes) { // score the models std::vector<std::tuple<int, std::string, TensorT>> models_validation_errors; models_validation_errors.resize(models.size()); // launch the workers asynchronously validate_models_iter_ = 0; std::vector<std::future<bool>> task_results; for (size_t i = 0; i < model_interpreters.size(); ++i) { // make the packaged task and save the future std::packaged_task<bool(std::vector<Model<TensorT>>&, ModelTrainer<TensorT, InterpreterT>&, InterpreterT&, ModelLogger<TensorT>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 3>&, const std::vector<std::string>&, std::vector<std::tuple<int, std::string, TensorT>>& )> task(PopulationTrainer<TensorT, InterpreterT>::validateModels_); task_results.push_back(task.get_future()); // create a copy of the model logger ModelLogger<TensorT> model_logger_copy = model_logger; // launch the interpreter std::thread task_thread(std::move(task), std::ref(models), std::ref(model_trainer), std::ref(model_interpreters[i]), std::ref(model_logger_copy), std::ref(input), std::ref(output), std::ref(time_steps), std::ref(input_nodes), std::ref(models_validation_errors)); task_thread.detach(); } // Retrieve the results as they come for (auto& task_result: task_results) { try { const bool result = task_result.get(); } catch (std::exception & e) { printf("Exception: %s", e.what()); } } // sort each model based on their scores in ascending order models_validation_errors = getTopNModels_( models_validation_errors, getNTop() ); // select a random subset of the top N models_validation_errors = getRandomNModels_( models_validation_errors, getNRandom() ); std::vector<int> selected_models; for (const std::tuple<int, std::string, TensorT>& model_error : models_validation_errors) selected_models.push_back(std::get<0>(model_error)); // purge non-selected models if (selected_models.size() != models.size()) { models.erase(std::remove_if(models.begin(), models.end(), [=](const Model<TensorT>& model){return std::count(selected_models.begin(), selected_models.end(), model.getId()) == 0;} ),models.end()); } if (models.size() > getNRandom()) removeDuplicateModels(models); return models_validation_errors; } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::validateModels_(std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes, std::vector<std::tuple<int, std::string, TensorT>>& model_validation_errors) { bool result = false; while (true) { // get the next available thread const size_t validate_models_iter = validate_models_iter_.fetch_add(1); if (validate_models_iter >= models.size()) { break; } // create the task and the future std::packaged_task<std::tuple<int, std::string, TensorT> (Model<TensorT>&, ModelTrainer<TensorT, InterpreterT>&, InterpreterT&, ModelLogger<TensorT>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 3>&, const std::vector<std::string>& )> task(PopulationTrainer<TensorT, InterpreterT>::validateModel_); std::future<std::tuple<int, std::string, TensorT>> task_result = task.get_future(); // launch the thread std::thread task_thread(std::move(task), std::ref(models[validate_models_iter]), std::ref(model_trainer), std::ref(model_interpreter), std::ref(model_logger), std::ref(input), std::ref(output), std::ref(time_steps), std::ref(input_nodes)); task_thread.detach(); // retreive the results try { model_validation_errors.at(validate_models_iter) = task_result.get(); result = true; } catch (std::exception & e) { printf("Exception: %s", e.what()); } // Clear the interpreter cache before moving to the next model // TODO: add test case for this model_interpreter.clear_cache(); } return result; } template<typename TensorT, typename InterpreterT> std::tuple<int, std::string, TensorT> PopulationTrainer<TensorT, InterpreterT>::validateModel_( Model<TensorT>& model, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes) { //std::lock_guard<std::mutex> lock(validateModel_mutex); // score the model try { std::vector<TensorT> model_errors = model_trainer.validateModel( model, input, output, time_steps, input_nodes, model_logger, model_interpreter); TensorT model_ave_error = 1e6; if (model_errors.size() > 0) model_ave_error = std::accumulate(model_errors.begin(), model_errors.end(), 0.0) / model_errors.size(); if (isnan(model_ave_error)) model_ave_error = 1e32; // a large number char cout_char[512]; sprintf(cout_char, "Model%s (Nodes: %d, Links: %d) error: %.6f\n", model.getName().data(), model.getNodes().size(), model.getLinks().size(), model_ave_error); std::cout << cout_char; return std::make_tuple(model.getId(), model.getName(), model_ave_error); } catch (std::exception& e) { printf("The model %s is broken.\n", model.getName().data()); printf("Error: %s.\n", e.what()); return std::make_tuple(model.getId(), model.getName(), TensorT(1e6)); } } template<typename TensorT, typename InterpreterT> std::vector<std::tuple<int, std::string, TensorT>> PopulationTrainer<TensorT, InterpreterT>::getTopNModels_( std::vector<std::tuple<int, std::string, TensorT>> model_validation_scores, const int& n_top) { // sort each model based on their scores in ascending order std::sort( model_validation_scores.begin(), model_validation_scores.end(), [=](std::tuple<int, std::string, TensorT>& a, std::tuple<int, std::string, TensorT>& b) { return std::get<2>(a) < std::get<2>(b); } ); // select the top N from the models int n_ = n_top; if (n_ > model_validation_scores.size()) n_ = model_validation_scores.size(); std::vector<std::tuple<int, std::string, TensorT>> top_n_models; for (int i = 0; i < n_; ++i) { top_n_models.push_back(model_validation_scores[i]); } return top_n_models; } template<typename TensorT, typename InterpreterT> std::vector<std::tuple<int, std::string, TensorT>> PopulationTrainer<TensorT, InterpreterT>::getRandomNModels_( std::vector<std::tuple<int, std::string, TensorT>> model_validation_scores, const int& n_random) { int n_ = n_random; if (n_ > model_validation_scores.size()) n_ = model_validation_scores.size(); // select a random subset of the top N std::random_device seed; std::mt19937 engine(seed()); std::shuffle(model_validation_scores.begin(), model_validation_scores.end(), engine); std::vector<std::tuple<int, std::string, TensorT>> random_n_models; for (int i = 0; i < n_; ++i) { random_n_models.push_back(model_validation_scores[i]); } return random_n_models; } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::replicateModels( std::vector<Model<TensorT>>& models, ModelReplicator<TensorT>& model_replicator, const std::string& unique_str, const int& n_threads) { // resize the models to the expected size const int models_to_replicate = models.size(); models.resize(models.size() + models.size() * n_replicates_per_model_, Model<TensorT>()); // launch the workers asynchronously replicate_models_iter_ = 0; std::vector<std::future<bool>> task_results; for (int i=0;i<n_threads;++i) { // encapsulate in a packaged_task std::packaged_task<bool(std::vector<Model<TensorT>>&, ModelReplicator<TensorT>&, const std::string&, const int&, const int&, const bool&, const int&, const bool&, const bool& )> task(PopulationTrainer<TensorT, InterpreterT>::replicateModels_); // launch the thread task_results.push_back(task.get_future()); std::thread task_thread(std::move(task), std::ref(models), std::ref(model_replicator), std::ref(unique_str), std::ref(models_to_replicate), std::ref(n_replicates_per_model_), std::ref(remove_isolated_nodes_), std::ref(prune_model_num_), std::ref(check_complete_input_to_output_), std::ref(reset_model_copy_weights_)); task_thread.detach(); } // Retrieve the results as they come for (auto& task_result : task_results) { try { const bool result = task_result.get(); } catch (std::exception & e) { printf("Exception: %s", e.what()); } } // reset the template model weights if (reset_model_template_weights_) for (int i=0;i<models_to_replicate;++i) for (auto& weight_map : models.at(i).getWeightsMap()) weight_map.second->setInitWeight(true); // removeDuplicateModels(models); // safer to use, but does hurt performance } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::replicateModels_(std::vector<Model<TensorT>>& models, ModelReplicator<TensorT>& model_replicator, const std::string& unique_str, const int& models_to_replicate, const int& n_replicates_per_model, const bool& remove_isolated_nodes, const int& prune_model_num, const bool& check_complete_input_to_output, const bool& reset_model_copy_weights) { bool status = false; while(true) { const size_t replicate_models_iter = replicate_models_iter_.fetch_add(1); const size_t models_id_iter = models_id_iter_.fetch_add(1); if (replicate_models_iter >= n_replicates_per_model * models_to_replicate) { break; } // determine the model to replicate and modify const int model_index = replicate_models_iter / n_replicates_per_model; // make the task std::packaged_task<std::pair<bool, Model<TensorT>>// encapsulate in a packaged_task (const Model<TensorT>&, ModelReplicator<TensorT>&, const std::string&, const int&, const bool&, const int&, const bool&, const bool& )> task(PopulationTrainer<TensorT, InterpreterT>::replicateModel_); // launch the thread std::future<std::pair<bool, Model<TensorT>>> task_result = task.get_future(); std::thread task_thread(std::move(task), std::ref(models.at(model_index)), std::ref(model_replicator), std::ref(unique_str), std::ref(replicate_models_iter), std::ref(remove_isolated_nodes), std::ref(prune_model_num), std::ref(check_complete_input_to_output), std::ref(reset_model_copy_weights)); task_thread.detach(); // retrieve the results try { std::pair<bool, Model<TensorT>> model_task_result = task_result.get(); if (model_task_result.first) { model_task_result.second.setId(models_id_iter); models.at(models_to_replicate + replicate_models_iter) = model_task_result.second; } else { std::cout << "All models were broken." << std::endl; } } catch (std::exception & e) { printf("Exception: %s", e.what()); } } return status; } template<typename TensorT, typename InterpreterT> std::pair<bool, Model<TensorT>> PopulationTrainer<TensorT, InterpreterT>::replicateModel_( const Model<TensorT>& model, ModelReplicator<TensorT>& model_replicator, const std::string& unique_str, const int& cnt, const bool& remove_isolated_nodes, const int& prune_model_num, const bool& check_complete_input_to_output, const bool& reset_model_copy_weights) { //std::lock_guard<std::mutex> lock(replicateModel_mutex); // rename the model std::regex re("@"); std::vector<std::string> str_tokens; std::string model_name_new = model.getName(); std::copy( std::sregex_token_iterator(model_name_new.begin(), model_name_new.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(str_tokens)); if (str_tokens.size() > 1) model_name_new = str_tokens[0]; // only retain the last timestamp char model_name_char[512]; sprintf(model_name_char, "%s@replicateModel#%s", model_name_new.data(), unique_str.data()); std::string model_name = model_replicator.makeUniqueHash(model_name_char, std::to_string(cnt)); int max_iters = 8; // changed from 32 for (int iter = 0; iter < max_iters; ++iter) { Model<TensorT> model_copy(model); model_copy.setName(model_name); model_replicator.makeRandomModifications(); model_replicator.modifyModel(model_copy, unique_str, prune_model_num); // model checks // TODO: add unit test coverage for these cases if (remove_isolated_nodes) model_copy.removeIsolatedNodes(); if (prune_model_num > 0) model_copy.pruneModel(prune_model_num); bool complete_model = true; if (check_complete_input_to_output) complete_model = model_copy.checkCompleteInputToOutput(); if (complete_model) { // reset the weights if (reset_model_copy_weights) { for (auto& weight_map : model_copy.getWeightsMap()) { weight_map.second->setInitWeight(true); } } return std::make_pair(true, model_copy); } } return std::make_pair(false, Model<TensorT>()); //throw std::runtime_error("All modified models were broken!"); } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::trainModels( std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes) { // Launch the workers asynchronously train_models_iter_ = 0; std::vector<std::future<bool>> task_results; for (size_t i = 0; i < model_interpreters.size(); ++i) { // make the packaged task and save the future std::packaged_task<bool(std::vector<Model<TensorT>>&, ModelTrainer<TensorT, InterpreterT>&, InterpreterT&, ModelLogger<TensorT>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 3>&, const std::vector<std::string>& )> task(PopulationTrainer<TensorT, InterpreterT>::trainModels_); task_results.push_back(task.get_future()); // create a copy of the model logger ModelLogger<TensorT> model_logger_copy = model_logger; // launch the interpreter std::thread task_thread(std::move(task), std::ref(models), std::ref(model_trainer), std::ref(model_interpreters[i]), std::ref(model_logger_copy), std::ref(input), std::ref(output), std::ref(time_steps), std::ref(input_nodes)); task_thread.detach(); } // retrieve the results as they come in for (auto& task_result: task_results) { try { const bool result = task_result.get(); } catch (std::exception & e) { printf("Exception: %s", e.what()); } } } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::trainModels_(std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes) { bool status = false; while (true) { // get the next available thread const size_t train_models_iter = train_models_iter_.fetch_add(1); if (train_models_iter >= models.size()) { break; } // create the task and the future std::packaged_task<bool (Model<TensorT>&, ModelTrainer<TensorT, InterpreterT>&, InterpreterT&, ModelLogger<TensorT>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 3>&, const std::vector<std::string>& )> task(PopulationTrainer<TensorT, InterpreterT>::trainModel_); std::future<bool> task_result = task.get_future(); // launch the thread std::thread task_thread(std::move(task), std::ref(models[train_models_iter]), std::ref(model_trainer), std::ref(model_interpreter), std::ref(model_logger), std::ref(input), std::ref(output), std::ref(time_steps), std::ref(input_nodes)); task_thread.detach(); // retreive the results try { status = task_result.get(); } catch (std::exception & e) { printf("Exception: %s", e.what()); } // Clear the interpreter cache before moving to the next model // TODO: add test case for this model_interpreter.clear_cache(); } return status; } template<typename TensorT, typename InterpreterT> bool PopulationTrainer<TensorT, InterpreterT>::trainModel_( Model<TensorT>& model, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes) { //std::lock_guard<std::mutex> lock(trainModel_mutex); try { model_trainer.trainModel( model, input, output, time_steps, input_nodes, model_logger, model_interpreter); return true; } catch (std::exception& e) { printf("The model %s is broken.\n", model.getName().data()); printf("Error: %s.\n", e.what()); ModelInterpreterFile<TensorT, InterpreterT>::storeModelInterpreterCsv(model.getName() + "_interpreterOps.csv", model_interpreter); return false; } } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::evalModels( std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes) { // launch the workers asynchronously eval_models_iter_ = 0; std::vector<std::future<bool>> task_results; for (size_t i = 0; i < model_interpreters.size(); ++i) { std::packaged_task<bool // encapsulate in a packaged_task (std::vector<Model<TensorT>>&, ModelTrainer<TensorT, InterpreterT>&, InterpreterT&, ModelLogger<TensorT>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 3>&, const std::vector<std::string>& )> task(PopulationTrainer<TensorT, InterpreterT>::evalModels_); // create a copy of the model trainer and logger ModelLogger<TensorT> model_logger_copy = model_logger; // launch the thread task_results.push_back(task.get_future()); std::thread task_thread(std::move(task), std::ref(models), std::ref(model_trainer), std::ref(model_interpreters.at(i)), std::ref(model_logger_copy), std::ref(input), std::ref(time_steps), std::ref(input_nodes)); task_thread.detach(); } // Retrieve the results as they come for (auto& task_result : task_results) { try { const bool result = task_result.get(); } catch (std::exception & e) { printf("Exception: %s", e.what()); } } } template<typename TensorT, typename InterpreterT> inline bool PopulationTrainer<TensorT, InterpreterT>::evalModels_(std::vector<Model<TensorT>>& models, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes) { bool result = false; while (true) { // get the next available thread const size_t eval_models_iter = eval_models_iter_.fetch_add(1); if (eval_models_iter >= models.size()) { break; } // create the task and the future std::packaged_task<bool(Model<TensorT>&, ModelTrainer<TensorT, InterpreterT>&, InterpreterT&, ModelLogger<TensorT>&, const Eigen::Tensor<TensorT, 4>&, const Eigen::Tensor<TensorT, 3>&, const std::vector<std::string>& )> task(PopulationTrainer<TensorT, InterpreterT>::evalModel_); std::future<bool> task_result = task.get_future(); // launch the thread std::thread task_thread(std::move(task), std::ref(models.at(eval_models_iter)), std::ref(model_trainer), std::ref(model_interpreter), std::ref(model_logger), std::ref(input), std::ref(time_steps), std::ref(input_nodes)); task_thread.detach(); // retreive the results try { result = task_result.get(); } catch (std::exception & e) { printf("Exception: %s", e.what()); } // Clear the interpreter cache before moving to the next model // TODO: add test case for this model_interpreter.clear_cache(); } return result; } template<typename TensorT, typename InterpreterT> bool PopulationTrainer<TensorT, InterpreterT>::evalModel_( Model<TensorT>& model, ModelTrainer<TensorT, InterpreterT>& model_trainer, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes) { //std::lock_guard<std::mutex> lock(evalModel_mutex); try { model_trainer.evaluateModel( model, input, time_steps, input_nodes, model_logger, model_interpreter); return true; } catch (std::exception& e) { printf("The model %s is broken.\n", model.getName().data()); printf("Error: %s.\n", e.what()); return false; } } template<typename TensorT, typename InterpreterT> std::vector<std::vector<std::tuple<int, std::string, TensorT>>> PopulationTrainer<TensorT, InterpreterT>::evolveModels( std::vector<Model<TensorT>>& models, const std::string& population_name, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelReplicator<TensorT>& model_replicator, DataSimulator<TensorT> &data_simulator, ModelLogger<TensorT>& model_logger, PopulationLogger<TensorT>& population_logger, const std::vector<std::string>& input_nodes) { std::vector<std::vector<std::tuple<int, std::string, TensorT>>> models_validation_errors_per_generation; std::vector<std::string> output_nodes = model_trainer.getLossOutputNodesLinearized(); // generate the input/output data for validation std::cout << "Generating the input/output data for validation..." << std::endl; Eigen::Tensor<TensorT, 4> input_data_validation(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)input_nodes.size(), model_trainer.getNEpochsValidation()); Eigen::Tensor<TensorT, 4> output_data_validation(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)output_nodes.size(), model_trainer.getNEpochsValidation()); Eigen::Tensor<TensorT, 3> time_steps_validation(model_trainer.getBatchSize(), model_trainer.getMemorySize(), model_trainer.getNEpochsValidation()); data_simulator.simulateValidationData(input_data_validation, output_data_validation, time_steps_validation); // Population initial conditions models_id_iter_ = models.size(); // Initialize the logger if (this->getLogTraining()) population_logger.initLogs(population_name); // Evolve the population for (int iter = 0; iter < getNGenerations(); ++iter) { std::cout << "Iteration #: " + std::to_string(iter) << std::endl; // update the population dynamics adaptivePopulationScheduler(iter, models, models_validation_errors_per_generation); updateNEpochsTraining(model_trainer); // Generate the input and output data for training [BUG FREE] std::cout << "Generating the input/output data for training..." << std::endl; Eigen::Tensor<TensorT, 4> input_data_training(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)input_nodes.size(), model_trainer.getNEpochsTraining()); Eigen::Tensor<TensorT, 4> output_data_training(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)output_nodes.size(), model_trainer.getNEpochsTraining()); Eigen::Tensor<TensorT, 3> time_steps_training(model_trainer.getBatchSize(), model_trainer.getMemorySize(), model_trainer.getNEpochsTraining()); data_simulator.simulateTrainingData(input_data_training, output_data_training, time_steps_training); // train the population std::cout << "Training the models..." << std::endl; if (model_trainer.getNEpochsTraining() > 0) { trainModels(models, model_trainer, model_interpreters, model_logger, input_data_training, output_data_training, time_steps_training, input_nodes); } // select the top N from the population std::cout << "Selecting the models..." << std::endl; std::vector<std::tuple<int, std::string, TensorT>> models_validation_errors; if (select_models_) { models_validation_errors = selectModels( models, model_trainer, model_interpreters, model_logger, input_data_validation, output_data_validation, time_steps_validation, input_nodes); } else { // TODO: add unit test coverage for this case for (Model<TensorT>& model : models) { models_validation_errors.push_back(std::make_tuple(model.getId(), model.getName(), TensorT(-1))); } } models_validation_errors_per_generation.push_back(models_validation_errors); // update the model replication attributes model_replicator.adaptiveReplicatorScheduler(iter, models, models_validation_errors_per_generation); // log generation if (this->getLogTraining()) { //if (this->getVerbosityLevel() >= 2) // std::cout << "Logging..." << std::endl; this->trainingPopulationLogger(iter, models, population_logger, models_validation_errors); } if (iter < getNGenerations() - 1) { // replicate and modify models // [TODO: add options for verbosity] std::cout << "Replicating and modifying the models..." << std::endl; replicateModels(models, model_replicator, std::to_string(iter), model_interpreters.size()); std::cout << "Population size of " << models.size() << std::endl; } } return models_validation_errors_per_generation; } template<typename TensorT, typename InterpreterT> void PopulationTrainer<TensorT, InterpreterT>::evaluateModels( std::vector<Model<TensorT>>& models, const std::string& population_name, ModelTrainer<TensorT, InterpreterT>& model_trainer, std::vector<InterpreterT>& model_interpreters, ModelReplicator<TensorT>& model_replicator, DataSimulator<TensorT>& data_simulator, ModelLogger<TensorT>& model_logger, const std::vector<std::string>& input_nodes) { // generate the input/output data for evaluation std::cout << "Generating the input/output data for evaluation..." << std::endl; Eigen::Tensor<TensorT, 4> input_data_evaluation(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)input_nodes.size(), model_trainer.getNEpochsEvaluation()); Eigen::Tensor<TensorT, 3> time_steps_evaluation(model_trainer.getBatchSize(), model_trainer.getMemorySize(), model_trainer.getNEpochsEvaluation()); data_simulator.simulateEvaluationData(input_data_evaluation, time_steps_evaluation); // Evaluate the population std::cout << "Evaluating the model..." << std::endl; evalModels(models, model_trainer, model_interpreters, model_logger, input_data_evaluation, time_steps_evaluation, input_nodes); } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::adaptivePopulationScheduler(const int & n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // TODO user } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::trainingPopulationLogger(const int & n_generations, std::vector<Model<TensorT>>& models, PopulationLogger<TensorT>& population_logger, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation) { // TODO user } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::setNEpochsTraining(const int & n_epochs) { n_epochs_training_ = n_epochs; } template<typename TensorT, typename InterpreterT> inline int PopulationTrainer<TensorT, InterpreterT>::getNEpochsTraining() const { return n_epochs_training_; } template<typename TensorT, typename InterpreterT> inline void PopulationTrainer<TensorT, InterpreterT>::updateNEpochsTraining(ModelTrainer<TensorT, InterpreterT>& model_trainer) { if (n_epochs_training_ >= 0) model_trainer.setNEpochsTraining(n_epochs_training_); } // TensorT PopulationTrainer<TensorT, InterpreterT>::calculateMean(std::vector<TensorT> values) // { // if (values.empty()) // return 0; // return std::accumulate(values.begin(), values.end(), 0.0) / values.size(); // } // TensorT PopulationTrainer<TensorT, InterpreterT>::calculateStdDev(std::vector<TensorT> values) // { // if (numbers.size() <= 1u) // return 0; // auto const add_square = [mean](TensorT sum, TensorT i) // { // auto d = i - mean; // return sum + d*d; // }; // TensorT total = std::accumulate(numbers.begin(), numbers.end(), 0.0, add_square); // return total / (numbers.size() - 1); // } } #endif //EVONET_POPULATIONTRAINER_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_WEIGHTFILE_H #define EVONET_WEIGHTFILE_H // .h #include <EvoNet/ml/Weight.h> #include <unsupported/Eigen/CXX11/Tensor> #include <iostream> #include <fstream> #include <vector> #include <map> #include <string> // .cpp #include <EvoNet/io/csv.h> #include <EvoNet/io/CSVWriter.h> #include <regex> #include <EvoNet/core/StringParsing.h> #include <cereal/archives/binary.hpp> namespace EvoNet { /** @brief WeightFile */ template<typename TensorT> class WeightFile { public: WeightFile() = default; ///< Default constructor ~WeightFile() = default; ///< Default destructor /** @brief Load weights from binary file @param filename The name of the weights file @param weights The weights to load data into @returns Status True on success, False if not */ bool loadWeightsBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights); /** @brief Load weights from csv file @param filename The name of the weights file @param weights The weights to load data into @returns Status True on success, False if not */ bool loadWeightsCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights); bool loadWeightValuesCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights); /** @brief Stores weights from binary file @param filename The name of the weights file @param weights The weights to sore @returns Status True on success, False if not */ bool storeWeightsBinary(const std::string& filename, const std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights); /** @brief Stores weights from binary file @param filename The name of the weights file @param weights The weights to sore @returns Status True on success, False if not */ bool storeWeightsCsv(const std::string& filename, const std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights); bool storeWeightValuesCsv(const std::string& filename, const std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights); std::map<std::string, TensorT> parseParameters(const std::string& parameters); }; template<typename TensorT> bool WeightFile<TensorT>::loadWeightsBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights) { std::ifstream ifs(filename, std::ios::binary); if (ifs.is_open()) { cereal::BinaryInputArchive iarchive(ifs); iarchive(weights); ifs.close(); } return true; } template<typename TensorT> bool WeightFile<TensorT>::loadWeightsCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights) { weights.clear(); io::CSVReader<9> weights_in(filename); weights_in.read_header(io::ignore_extra_column, "weight_name", "weight_init_op", "weight_init_params", "solver_op", "solver_params", "weight_value", "module_name", "layer_name", "tensor_index"); std::string weight_name, weight_init_op_str, weight_init_params_str, solver_op_str, solver_params_str, weight_value_str, module_name_str, layer_name_str, tensor_index_str; while (weights_in.read_row(weight_name, weight_init_op_str, weight_init_params_str, solver_op_str, solver_params_str, weight_value_str, module_name_str, layer_name_str, tensor_index_str)) { // parse the weight_init_params std::map<std::string, TensorT> weight_init_params = parseParameters(weight_init_params_str); // parse the weight_init_op std::shared_ptr<WeightInitOp<TensorT>> weight_init; if (weight_init_op_str == "ConstWeightInitOp") { ConstWeightInitOp<TensorT>* ptr = nullptr; if (weight_init_params.count("n")) ptr = new ConstWeightInitOp<TensorT>(weight_init_params.at("n")); else ptr = new ConstWeightInitOp<TensorT>(1.0); weight_init.reset(ptr); } else if (weight_init_op_str == "RandWeightInitOp") { RandWeightInitOp<TensorT>* ptr = nullptr; if (weight_init_params.count("n")) ptr = new RandWeightInitOp<TensorT>(weight_init_params.at("n")); else ptr = new RandWeightInitOp<TensorT>(1.0); weight_init.reset(ptr); } else if (weight_init_op_str == "RangeWeightInitOp") { RangeWeightInitOp<TensorT>* ptr = nullptr; if (weight_init_params.count("lb") && weight_init_params.count("ub")) ptr = new RangeWeightInitOp<TensorT>(weight_init_params.at("lb"), weight_init_params.at("ub")); else ptr = new RangeWeightInitOp<TensorT>(0.0, 1.0); weight_init.reset(ptr); } else std::cout << "WeightInitOp " << weight_init_op_str << " for weight_name " << weight_name << " was not recognized." << std::endl; // parse the solver_params_str std::map<std::string, TensorT> solver_params; if (!solver_params_str.empty()) solver_params = parseParameters(solver_params_str); // parse the solver_op std::shared_ptr<SolverOp<TensorT>> solver; if (solver_op_str == "SGDOp") { SGDOp<TensorT>* ptr = new SGDOp<TensorT>(); ptr->setLearningRate(0.01); if (solver_params.count("learning_rate")) ptr->setLearningRate(solver_params.at("learning_rate")); ptr->setMomentum(0.9); if (solver_params.count("momentum")) ptr->setMomentum(solver_params.at("momentum")); ptr->setGradientThreshold(1e6); if (solver_params.count("gradient_threshold")) ptr->setGradientThreshold(solver_params.at("gradient_threshold")); ptr->setGradientNoiseSigma(0.0); if (solver_params.count("gradient_noise_sigma")) ptr->setGradientNoiseSigma(solver_params.at("gradient_noise_sigma")); ptr->setGradientNoiseGamma(0.0); if (solver_params.count("gradient_noise_gamma")) ptr->setGradientNoiseGamma(solver_params.at("gradient_noise_gamma")); solver.reset(ptr); } else if (solver_op_str == "AdamOp") { AdamOp<TensorT>* ptr = new AdamOp<TensorT>(); if (solver_params.count("learning_rate")) ptr->setLearningRate(solver_params.at("learning_rate")); ptr->setMomentum(0.9); if (solver_params.count("momentum")) ptr->setMomentum(solver_params.at("momentum")); ptr->setMomentum2(0.999); if (solver_params.count("momentum2")) ptr->setMomentum2(solver_params.at("momentum2")); ptr->setDelta(1e-8); if (solver_params.count("delta")) ptr->setDelta(solver_params.at("delta")); ptr->setGradientThreshold(1e6); if (solver_params.count("gradient_threshold")) ptr->setGradientThreshold(solver_params.at("gradient_threshold")); ptr->setGradientNoiseSigma(0.0); if (solver_params.count("gradient_noise_sigma")) ptr->setGradientNoiseSigma(solver_params.at("gradient_noise_sigma")); ptr->setGradientNoiseGamma(0.0); if (solver_params.count("gradient_noise_gamma")) ptr->setGradientNoiseGamma(solver_params.at("gradient_noise_gamma")); solver.reset(ptr); } else if (solver_op_str == "DummySolverOp") { DummySolverOp<TensorT>* ptr = new DummySolverOp<TensorT>(); solver.reset(ptr); } else std::cout << "SolverOp " << solver_op_str << " for weight_name " << weight_name << " was not recognized." << std::endl; std::shared_ptr<Weight<TensorT>> weight(new Weight<TensorT>(weight_name, weight_init, solver)); // parse the weight value TensorT weight_value = 0; try { weight_value = std::stof(weight_value_str); } catch (std::exception& e) { printf("Exception: %s", e.what()); } weight->setWeight(weight_value); weight->setInitWeight(false); // parse the tensor indexing weight->setModuleName(module_name_str); weight->setLayerName(layer_name_str); std::vector<std::string> tensor_indices = SplitString(tensor_index_str, "|"); for (std::string& tensor_index : tensor_indices) { if (!tensor_index.empty()) { std::vector<std::string> tensor_indexes = SplitString(ReplaceTokens(tensor_index, { "[\{\}]", "\\s+" }, ""), ";"); assert(tensor_indexes.size() == 3); int ind1 = -1, ind2 = -1, ind3 = -1; ind1 = std::stoi(tensor_indexes[0]); ind2 = std::stoi(tensor_indexes[1]); ind3 = std::stoi(tensor_indexes[2]); weight->addTensorIndex(std::make_tuple(ind1, ind2, ind3)); } } weights.emplace(weight_name, weight); } return true; } template<typename TensorT> inline bool WeightFile<TensorT>::loadWeightValuesCsv(const std::string & filename, std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights) { io::CSVReader<2> weights_in(filename); weights_in.read_header(io::ignore_extra_column, "weight_name", "weight_value"); std::string weight_name, weight_value_str = ""; while (weights_in.read_row(weight_name, weight_value_str)) { // parse the weight value TensorT weight_value = 0; try { weight_value = std::stof(weight_value_str); } catch (std::exception& e) { printf("Exception: %s", e.what()); } if (weights.count(weight_name)) { weights.at(weight_name)->setWeight(weight_value); weights.at(weight_name)->setInitWeight(false); } else { //std::cout << "Weight " << weight_name << " was not found in the model and will be skipped." << std::endl; } } return true; } template<typename TensorT> std::map<std::string, TensorT> WeightFile<TensorT>::parseParameters(const std::string& parameters) { // parse the parameters std::regex re(";"); std::vector<std::string> str_tokens; std::copy( std::sregex_token_iterator(parameters.begin(), parameters.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(str_tokens)); // break into parameter name and value std::map<std::string, TensorT> parameters_map; for (std::string str : str_tokens) { str.erase(remove_if(str.begin(), str.end(), isspace), str.end()); std::regex re1(":"); std::vector<std::string> params; std::copy( std::sregex_token_iterator(str.begin(), str.end(), re1, -1), std::sregex_token_iterator(), std::back_inserter(params)); std::string param_name = params[0]; TensorT param_value = 0.0; try { param_value = std::stof(params[1]); } catch (std::exception& e) { printf("Exception: %s", e.what()); } parameters_map.emplace(param_name, param_value); } return parameters_map; } template<typename TensorT> bool WeightFile<TensorT>::storeWeightsBinary(const std::string& filename, const std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights) { std::ofstream ofs(filename, std::ios::binary); //if (ofs.is_open() == false) { // Lines check to make sure the file is not already created cereal::BinaryOutputArchive oarchive(ofs); oarchive(weights); ofs.close(); //} // Lines check to make sure the file is not already created return true; } template<typename TensorT> bool WeightFile<TensorT>::storeWeightsCsv(const std::string& filename, const std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights) { CSVWriter csvwriter(filename); // write the headers to the first line std::vector<std::string> headers = { "weight_name", "weight_init_op", "weight_init_params", "solver_op", "solver_params", "weight_value", "module_name", "layer_name", "tensor_index"}; csvwriter.writeDataInRow(headers.begin(), headers.end()); for (const auto& weight : weights) { std::vector<std::string> row; row.push_back(weight.second->getName()); // parse the weight_init_op const std::string weight_init_op_name = weight.second->getWeightInitOp()->getName(); row.push_back(weight_init_op_name); // parse the weight_init_params row.push_back(weight.second->getWeightInitOp()->getParamsAsStr()); // parse the solver_op const std::string solver_op_name = weight.second->getSolverOp()->getName(); row.push_back(solver_op_name); // parse the solver_op_params row.push_back(weight.second->getSolverOp()->getParamsAsStr()); // parse the weight value row.push_back(std::to_string(weight.second->getWeight())); // parse the module name row.push_back(weight.second->getModuleName()); // parse the tensor indexing row.push_back(weight.second->getLayerName()); std::string tensor_index = ""; for (int i = 0; i < weight.second->getTensorIndex().size(); ++i) { tensor_index += "{" + std::to_string(std::get<0>(weight.second->getTensorIndex()[i])) + ";" + std::to_string(std::get<1>(weight.second->getTensorIndex()[i])) + ";" + std::to_string(std::get<2>(weight.second->getTensorIndex()[i])) + "}"; if (i < weight.second->getTensorIndex().size() - 1) tensor_index += "|"; } row.push_back(tensor_index); // write to file csvwriter.writeDataInRow(row.begin(), row.end()); } return true; } template<typename TensorT> inline bool WeightFile<TensorT>::storeWeightValuesCsv(const std::string & filename, const std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights) { CSVWriter csvwriter(filename); // write the headers to the first line const std::vector<std::string> headers = { "weight_name", "weight_value" }; csvwriter.writeDataInRow(headers.begin(), headers.end()); for (const auto& weight : weights) { std::vector<std::string> row; row.push_back(weight.second->getName()); // parse the weight value row.push_back(std::to_string(weight.second->getWeight())); // write to file csvwriter.writeDataInRow(row.begin(), row.end()); } return true; } } #endif //EVONET_WEIGHTFILE_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE MetabolomicsLatentTraversalDataSimulator test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/MetabolomicsLatentTraversalDataSimulator.h> #include <EvoNet/test_config.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(metabolomicsLatentTraversalDataSimulator) BOOST_AUTO_TEST_CASE(constructor) { MetabolomicsLatentTraversalDataSimulator<float>* ptr = nullptr; MetabolomicsLatentTraversalDataSimulator<float>* nullPointer = nullptr; ptr = new MetabolomicsLatentTraversalDataSimulator<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { MetabolomicsLatentTraversalDataSimulator<float>* ptr = nullptr; ptr = new MetabolomicsLatentTraversalDataSimulator<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(readAndProcessMetabolomicsTrainingAndValidationData) { // parameters for testing std::string biochem_rxns_filename = EVONET_GET_TEST_DATA_PATH("RBCGlycolysis.csv"); std::string metabo_data_filename_train = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_train.csv"); std::string meta_data_filename_train = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_train.csv"); std::string metabo_data_filename_test = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_test.csv"); std::string meta_data_filename_test = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_test.csv"); const int n_continuous = 4; const int n_discrete = 2; const int n_labels = 1; const int n_continuous_steps = 16; const int n_epochs = n_continuous_steps * n_continuous * n_discrete * n_labels; const int batch_size = 64; const int memory_size = 1; int n_reps_per_sample = -1; // data structures needed for testing Eigen::Tensor<float, 1> latent_data_expected; Eigen::Tensor<float, 1> input_test; Eigen::Tensor<float, 1> loss_output_test; Eigen::Tensor<float, 1> metric_output_test; // define the data simulator MetabolomicsLatentTraversalDataSimulator<float> metabolomics_data; metabolomics_data.n_encodings_continuous_ = n_continuous; metabolomics_data.n_encodings_discrete_ = n_discrete; metabolomics_data.n_continuous_steps_ = n_continuous_steps; int n_reaction_ids_training, n_labels_training, n_component_group_names_training; int n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation; // Test with use_concentrations, sample_values, fill_zero, w/o fold change, w/o offline transformation, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, true, false, true, false, false, false, false, false, "S01_D01_PLT_25C_0hr", 10, false, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reps_per_sample, int(n_epochs * batch_size / 4)); BOOST_CHECK_EQUAL(n_reaction_ids_training, 0); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 0); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data latent_data_expected.resize(n_continuous + n_discrete); latent_data_expected.setValues({ -1.64485, 0, 0, 0, 1, 0}); input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_continuous + n_discrete, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_continuous + n_discrete })); loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_continuous + n_discrete; ++i) { BOOST_CHECK_CLOSE(input_test(i), latent_data_expected(i), 1e-3); } for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_GE(loss_output_test(i), 0.00054280, 1e-6); BOOST_CHECK_LE(loss_output_test(i), 508.3080903, 1e-3); BOOST_CHECK_GE(metric_output_test(i), 0.00054280, 1e-6); BOOST_CHECK_LE(metric_output_test(i), 508.3080903, 1e-3); } // Test the tail of the training data latent_data_expected.resize(n_continuous + n_discrete); latent_data_expected.setValues({ 0, 0, 0, 1.64486, 0, 1 }); input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_continuous + n_discrete, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_continuous + n_discrete })); loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_continuous + n_discrete; ++i) { BOOST_CHECK_CLOSE(input_test(i), latent_data_expected(i), 1e-3); } for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_GE(loss_output_test(i), 0.0, 1e-6); BOOST_CHECK_LE(loss_output_test(i), 508.3080903, 1e-3); BOOST_CHECK_GE(metric_output_test(i), 0.0, 1e-6); BOOST_CHECK_LE(metric_output_test(i), 508.3080903, 1e-3); } // Test the head of the validation data latent_data_expected.resize(n_continuous + n_discrete); latent_data_expected.setValues({ -1.64485, 0, 0, 0, 1, 0 }); input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1,n_continuous + n_discrete, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_continuous + n_discrete })); loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_continuous + n_discrete; ++i) { BOOST_CHECK_CLOSE(input_test(i), latent_data_expected(i), 1e-3); } for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_GE(loss_output_test(i), 0.00054280, 1e-6); BOOST_CHECK_LE(loss_output_test(i), 508.3080903, 1e-3); BOOST_CHECK_GE(metric_output_test(i), 0.00054280, 1e-6); BOOST_CHECK_LE(metric_output_test(i), 508.3080903, 1e-3); } // Test the tail of the validation data latent_data_expected.resize(n_continuous + n_discrete); latent_data_expected.setValues({ 0, 0, 0, 1.64486, 0, 1 }); input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_continuous + n_discrete, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_continuous + n_discrete })); loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_continuous + n_discrete; ++i) { BOOST_CHECK_CLOSE(input_test(i), latent_data_expected(i), 1e-3); } for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_GE(loss_output_test(i), 0.0, 1e-6); BOOST_CHECK_LE(loss_output_test(i), 508.3080903, 1e-3); BOOST_CHECK_GE(metric_output_test(i), 0.0, 1e-6); BOOST_CHECK_LE(metric_output_test(i), 508.3080903, 1e-3); } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELREPLICATOREXPERIMENTAL_H #define EVONET_MODELREPLICATOREXPERIMENTAL_H // .h #include <EvoNet/ml/ModelReplicator.h> namespace EvoNet { /** @brief Experimental methods for `ModelReplicator` */ template<typename TensorT> class ModelReplicatorExperimental: public ModelReplicator<TensorT> { public: ModelReplicatorExperimental() = default; ///< Default constructor ~ModelReplicatorExperimental() = default; ///< Default destructor /// Overrides and members used in all examples bool set_modification_rate_by_prev_error_ = false; bool set_modification_rate_fixed_ = false; /* @brief Implementation of the `adaptiveReplicatorScheduler` */ void adaptiveReplicatorScheduler(const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) override; /* @brief Adjust the model replicator modification rate based on a fixed population size error rates @param[in] n_generations The number of generations @param[in] models A vector of models representing the population @param[in] models_errors_per_generations A record of model errors per generation */ void setModificationRateByPrevError(const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations); /* @brief Set the modification rate @param[in] n_generations The number of generations @param[in] models A vector of models representing the population @param[in] models_errors_per_generations A record of model errors per generation */ void setModificationRateFixed(const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations); }; template<typename TensorT> inline void ModelReplicatorExperimental<TensorT>::adaptiveReplicatorScheduler(const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // Adjust the models modifications rates if (set_modification_rate_by_prev_error_) this->setModificationRateByPrevError(n_generations, models, models_errors_per_generations); if (set_modification_rate_fixed_) this->setModificationRateFixed(n_generations, models, models_errors_per_generations); } template<typename TensorT> void ModelReplicatorExperimental<TensorT>::setModificationRateByPrevError(const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { if (n_generations > 2) { // Calculate the mean of the previous and current model errors TensorT mean_errors_per_generation_prev = 0, mean_errors_per_generation_cur = 0; for (const std::tuple<int, std::string, TensorT>& models_errors : models_errors_per_generations[n_generations - 1]) mean_errors_per_generation_prev += std::get<2>(models_errors); mean_errors_per_generation_prev = mean_errors_per_generation_prev / models_errors_per_generations[n_generations - 1].size(); for (const std::tuple<int, std::string, TensorT>& models_errors : models_errors_per_generations[n_generations]) mean_errors_per_generation_cur += std::get<2>(models_errors); mean_errors_per_generation_cur = mean_errors_per_generation_cur / models_errors_per_generations[n_generations].size(); // Lambdas to ensure the lb/ub of random modifications stay within certain limits auto clipLinkMod = [](const std::pair<int, int>& value) { std::pair<int, int> value_copy = value; if (value.second > 32) value_copy.second = 32; if (value.first > 16) value_copy.first = 16; if (value.second < 4) value_copy.second = 4; if (value.first < 0) value_copy.first = 0; return value_copy; }; auto clipNodeMod = [](const std::pair<int, int>& value) { std::pair<int, int> value_copy = value; if (value.second > 16) value_copy.second = 16; if (value.first > 8) value_copy.first = 8; if (value.second < 2) value_copy.second = 2; if (value.first < 0) value_copy.first = 0; return value_copy; }; // update the # of random modifications TensorT abs_percent_diff = abs(mean_errors_per_generation_prev - mean_errors_per_generation_cur) / mean_errors_per_generation_prev; if (abs_percent_diff < 0.1) { this->setRandomModifications( clipNodeMod(std::make_pair(this->getRandomModifications()[0].first * 2, this->getRandomModifications()[0].second * 2)), clipNodeMod(std::make_pair(this->getRandomModifications()[1].first * 2, this->getRandomModifications()[1].second * 2)), std::make_pair(this->getRandomModifications()[2].first * 2, this->getRandomModifications()[2].second * 2), std::make_pair(this->getRandomModifications()[3].first * 2, this->getRandomModifications()[3].second * 2), clipLinkMod(std::make_pair(this->getRandomModifications()[4].first * 2, this->getRandomModifications()[4].second * 2)), std::make_pair(this->getRandomModifications()[5].first * 2, this->getRandomModifications()[5].second * 2), clipNodeMod(std::make_pair(this->getRandomModifications()[6].first * 2, this->getRandomModifications()[6].second * 2)), clipLinkMod(std::make_pair(this->getRandomModifications()[7].first * 2, this->getRandomModifications()[7].second * 2)), clipNodeMod(std::make_pair(this->getRandomModifications()[8].first * 2, this->getRandomModifications()[8].second * 2)), clipNodeMod(std::make_pair(this->getRandomModifications()[9].first * 2, this->getRandomModifications()[9].second * 2)), std::make_pair(this->getRandomModifications()[10].first * 2, this->getRandomModifications()[10].second * 2), std::make_pair(this->getRandomModifications()[11].first * 2, this->getRandomModifications()[11].second * 2), std::make_pair(this->getRandomModifications()[12].first * 2, this->getRandomModifications()[12].second * 2)); } else if (abs_percent_diff >= 0.1 && abs_percent_diff < 0.5) { // Keep the same parameters } else { this->setRandomModifications( clipNodeMod(std::make_pair(this->getRandomModifications()[0].first / 2, this->getRandomModifications()[0].second / 2)), clipNodeMod(std::make_pair(this->getRandomModifications()[1].first / 2, this->getRandomModifications()[1].second / 2)), std::make_pair(this->getRandomModifications()[2].first / 2, this->getRandomModifications()[2].second / 2), std::make_pair(this->getRandomModifications()[3].first / 2, this->getRandomModifications()[3].second / 2), clipLinkMod(std::make_pair(this->getRandomModifications()[4].first / 2, this->getRandomModifications()[4].second / 2)), std::make_pair(this->getRandomModifications()[5].first / 2, this->getRandomModifications()[5].second / 2), clipNodeMod(std::make_pair(this->getRandomModifications()[6].first / 2, this->getRandomModifications()[6].second / 2)), clipLinkMod(std::make_pair(this->getRandomModifications()[7].first / 2, this->getRandomModifications()[7].second / 2)), clipNodeMod(std::make_pair(this->getRandomModifications()[8].first / 2, this->getRandomModifications()[8].second / 2)), clipNodeMod(std::make_pair(this->getRandomModifications()[9].first / 2, this->getRandomModifications()[9].second / 2)), std::make_pair(this->getRandomModifications()[10].first / 2, this->getRandomModifications()[10].second / 2), std::make_pair(this->getRandomModifications()[11].first / 2, this->getRandomModifications()[11].second / 2), std::make_pair(this->getRandomModifications()[12].first / 2, this->getRandomModifications()[12].second / 2)); } } else { this->setRandomModifications( std::make_pair(0, 2), std::make_pair(0, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 4), std::make_pair(0, 0), std::make_pair(0, 2), std::make_pair(0, 4), std::make_pair(0, 2), std::make_pair(0, 2), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); } } template<typename TensorT> void ModelReplicatorExperimental<TensorT>::setModificationRateFixed(const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { this->setRandomModifications( std::make_pair(0, 4), std::make_pair(0, 4), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 8), std::make_pair(0, 0), std::make_pair(0, 2), std::make_pair(0, 4), std::make_pair(0, 4), std::make_pair(0, 4), std::make_pair(0, 0), std::make_pair(0, 0), std::make_pair(0, 0)); } } #endif //EVONET_MODELREPLICATOREXPERIMENTAL_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelTrainer test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/Model.h> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> using namespace EvoNet; using namespace std; template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT>{}; template<typename TensorT> class DataSimulatorDAGToy : public DataSimulator<TensorT> { public: void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // Make the input data input_data.setValues({ {{1, 5, 1, 1}}, {{2, 6, 1, 1}}, {{3, 7, 1, 1}}, {{4, 8, 1, 1}} }); // Make the output data loss_output_data.setValues({ {{0, 1}}, {{0, 1}}, {{0, 1}}, {{0, 1}} }); metric_output_data.setValues({ {{0, 1}}, {{0, 1}}, {{0, 1}}, {{0, 1}} }); // Make the simulation time_steps time_steps.setConstant(1); }; void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // Make the input data input_data.setValues({ {{1, 1, 5, 1}}, {{1, 1, 2, 6}}, {{1, 1, 3, 7}}, {{1, 1, 4, 8 }} }); // Make the output data loss_output_data.setValues({ {{1, 0}}, {{1, 0}}, {{1, 0}}, {{1, 0}} }); metric_output_data.setValues({ {{0, 1}}, {{0, 1}}, {{0, 1}}, {{0, 1}} }); // Make the simulation time_steps time_steps.setConstant(1); }; }; template<typename TensorT> class DataSimulatorDCGToy : public DataSimulator<TensorT> { public: void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // Make the input data input_data.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); // Make the output data loss_output_data.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } }); metric_output_data.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } }); // Make the simulation time_steps time_steps.setValues({ {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1} } ); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { simulateTrainingData(input_data, loss_output_data, metric_output_data, time_steps); } }; BOOST_AUTO_TEST_SUITE(trainer) BOOST_AUTO_TEST_CASE(constructor) { ModelTrainerExt<float>* ptr = nullptr; ModelTrainerExt<float>* nullPointer = nullptr; ptr = new ModelTrainerExt<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ModelTrainerExt<float>* ptr = nullptr; ptr = new ModelTrainerExt<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(gettersAndSetters) { ModelTrainerExt<float> trainer; // Test defaults BOOST_CHECK_EQUAL(trainer.getBatchSize(), 1); BOOST_CHECK_EQUAL(trainer.getMemorySize(), 1); BOOST_CHECK_EQUAL(trainer.getNEpochsTraining(), 0); BOOST_CHECK_EQUAL(trainer.getNEpochsValidation(), 0); BOOST_CHECK_EQUAL(trainer.getNEpochsEvaluation(), 0); BOOST_CHECK_EQUAL(trainer.getVerbosityLevel(), 0); BOOST_CHECK_EQUAL(trainer.getNTBPTTSteps(), -1); BOOST_CHECK_EQUAL(trainer.getNTETTSteps(), -1); BOOST_CHECK_EQUAL(trainer.getLogTraining(), false); BOOST_CHECK_EQUAL(trainer.getLogValidation(), false); BOOST_CHECK_EQUAL(trainer.getLogEvaluation(), false); BOOST_CHECK_EQUAL(trainer.getFindCycles(), true); BOOST_CHECK_EQUAL(trainer.getFastInterpreter(), false); BOOST_CHECK_EQUAL(trainer.getPreserveOoO(), true); BOOST_CHECK_EQUAL(trainer.getLossFunctionHelpers().size(), 0); BOOST_CHECK_EQUAL(trainer.getMetricFunctionHelpers().size(), 0); BOOST_CHECK_EQUAL(trainer.getInterpretModel(), true); BOOST_CHECK_EQUAL(trainer.getResetModel(), true); BOOST_CHECK_EQUAL(trainer.getResetInterpreter(), true); // Test setters/getters trainer.setBatchSize(4); trainer.setMemorySize(1); trainer.setNEpochsTraining(100); trainer.setNEpochsValidation(10); trainer.setNEpochsEvaluation(2); trainer.setVerbosityLevel(1); trainer.setLogging(true, true, true); trainer.setNTBPTTSteps(1); trainer.setNTETTSteps(2); trainer.setFindCycles(false); trainer.setFastInterpreter(true); trainer.setPreserveOoO(false); trainer.setInterpretModel(false); trainer.setResetModel(false); trainer.setResetInterpreter(false); BOOST_CHECK_EQUAL(trainer.getBatchSize(), 4); BOOST_CHECK_EQUAL(trainer.getMemorySize(), 1); BOOST_CHECK_EQUAL(trainer.getNEpochsTraining(), 100); BOOST_CHECK_EQUAL(trainer.getNEpochsValidation(), 10); BOOST_CHECK_EQUAL(trainer.getNEpochsEvaluation(), 2); BOOST_CHECK_EQUAL(trainer.getVerbosityLevel(), 1); BOOST_CHECK_EQUAL(trainer.getNTBPTTSteps(), 1); BOOST_CHECK_EQUAL(trainer.getNTETTSteps(), 2); BOOST_CHECK_EQUAL(trainer.getLogTraining(), true); BOOST_CHECK_EQUAL(trainer.getLogValidation(), true); BOOST_CHECK_EQUAL(trainer.getLogEvaluation(), true); BOOST_CHECK_EQUAL(trainer.getFindCycles(), false); BOOST_CHECK_EQUAL(trainer.getFastInterpreter(), true); BOOST_CHECK_EQUAL(trainer.getPreserveOoO(), false); BOOST_CHECK_EQUAL(trainer.getInterpretModel(), false); BOOST_CHECK_EQUAL(trainer.getResetModel(), false); BOOST_CHECK_EQUAL(trainer.getResetInterpreter(), false); // Test loss and metric function getters and setters std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1, loss_function_helper2, loss_function_helper3; loss_function_helper1.output_nodes_ = { "Output000000000000", "Output00000000001", "Output000000000002" }; loss_function_helper1.loss_functions_ = { std::make_shared<BCEWithLogitsLossOp<float>>(BCEWithLogitsLossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<BCEWithLogitsLossGradOp<float>>(BCEWithLogitsLossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); loss_function_helper2.output_nodes_ = { "Mu000000000000", "Mu00000000001" }; loss_function_helper2.loss_functions_ = { std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper2); loss_function_helper3.output_nodes_ = { "Var000000000000", "Var00000000001" }; loss_function_helper3.loss_functions_ = { std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper3.loss_function_grads_ = { std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper3); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; metric_function_helper1.output_nodes_ = { "Output000000000000", "Output00000000001", "Output000000000002" }; metric_function_helper1.metric_functions_ = { std::make_shared<CosineSimilarityOp<float>>(CosineSimilarityOp<float>("Mean")), std::make_shared<CosineSimilarityOp<float>>(CosineSimilarityOp<float>("Var")), std::make_shared<PearsonROp<float>>(PearsonROp<float>("Mean")), std::make_shared<PearsonROp<float>>(PearsonROp<float>("Var")) }; metric_function_helper1.metric_names_ = { "CosineSimilarity-Mean", "CosineSimilarity-Var", "PearsonR-Mean", "PearsonR-Var" }; metric_function_helpers.push_back(metric_function_helper1); trainer.setLossFunctionHelpers(loss_function_helpers); trainer.setMetricFunctionHelpers(metric_function_helpers); BOOST_CHECK(trainer.getLossOutputNodesLinearized() == std::vector<std::string>({ "Output000000000000", "Output00000000001", "Output000000000002", "Mu000000000000", "Mu00000000001", "Var000000000000", "Var00000000001" })); BOOST_CHECK(trainer.getMetricOutputNodesLinearized() == std::vector<std::string>({ "Output000000000000", "Output00000000001", "Output000000000002" })); BOOST_CHECK(trainer.getMetricNamesLinearized() == std::vector<std::string>({ "CosineSimilarity-Mean", "CosineSimilarity-Var", "PearsonR-Mean", "PearsonR-Var" })); BOOST_CHECK_EQUAL(trainer.getNLossFunctions(), 3); BOOST_CHECK_EQUAL(trainer.getNMetricFunctions(), 4); } BOOST_AUTO_TEST_CASE(checkInputData) { ModelTrainerExt<float> trainer; trainer.setBatchSize(4); trainer.setMemorySize(1); trainer.setNEpochsTraining(100); trainer.setNEpochsValidation(100); const std::vector<std::string> input_nodes = {"0", "1", "6", "7"}; Eigen::Tensor<float, 4> input_data(trainer.getBatchSize(), trainer.getMemorySize(), (int)input_nodes.size(), trainer.getNEpochsTraining()); BOOST_CHECK(trainer.checkInputData(trainer.getNEpochsTraining(), input_data, trainer.getBatchSize(), trainer.getMemorySize(), input_nodes)); BOOST_CHECK(!trainer.checkInputData(90, input_data, trainer.getBatchSize(), trainer.getMemorySize(), input_nodes)); const std::vector<std::string> input_nodes2 = {"0", "1"}; BOOST_CHECK(!trainer.checkInputData(trainer.getNEpochsTraining(), input_data, trainer.getBatchSize(), trainer.getMemorySize(), input_nodes2)); BOOST_CHECK(!trainer.checkInputData(trainer.getNEpochsTraining(), input_data, trainer.getBatchSize(), 3, input_nodes)); BOOST_CHECK(!trainer.checkInputData(trainer.getNEpochsTraining(), input_data, 3, trainer.getMemorySize(), input_nodes)); } BOOST_AUTO_TEST_CASE(checkOutputData) { ModelTrainerExt<float> trainer; trainer.setBatchSize(4); trainer.setMemorySize(1); trainer.setNEpochsTraining(100); const std::vector<std::string> output_nodes = {"4", "5"}; Eigen::Tensor<float, 4> output_data(trainer.getBatchSize(), trainer.getMemorySize(), (int)output_nodes.size(), trainer.getNEpochsTraining()); BOOST_CHECK(trainer.checkOutputData(trainer.getNEpochsTraining(), output_data, trainer.getBatchSize(), trainer.getMemorySize(), output_nodes)); BOOST_CHECK(!trainer.checkOutputData(90, output_data, trainer.getBatchSize(), trainer.getMemorySize(), output_nodes)); const std::vector<std::string> output_nodes2 = {"0"}; BOOST_CHECK(!trainer.checkOutputData(trainer.getNEpochsTraining(), output_data, trainer.getBatchSize(), trainer.getMemorySize(), output_nodes2)); BOOST_CHECK(!trainer.checkOutputData(trainer.getNEpochsTraining(), output_data, 3, trainer.getMemorySize(), output_nodes)); BOOST_CHECK(!trainer.checkOutputData(trainer.getNEpochsTraining(), output_data, trainer.getBatchSize(), 0, output_nodes)); } BOOST_AUTO_TEST_CASE(checkLossFunctions) { ModelTrainerExt<float> model_trainer; BOOST_CHECK(!model_trainer.checkLossFunctions()); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1, loss_function_helper2, loss_function_helper3; loss_function_helper1.output_nodes_ = { "Output000000000000", "Output00000000001", "Output000000000002" }; loss_function_helper1.loss_functions_ = { std::make_shared<BCEWithLogitsLossOp<float>>(BCEWithLogitsLossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<BCEWithLogitsLossGradOp<float>>(BCEWithLogitsLossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); loss_function_helper2.output_nodes_ = { "Mu000000000000", "Mu00000000001" }; loss_function_helper2.loss_functions_ = { std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper2); loss_function_helper3.output_nodes_ = { "Var000000000000", "Var00000000001" }; loss_function_helper3.loss_functions_ = { std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper3.loss_function_grads_ = { std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper3); model_trainer.setLossFunctionHelpers(loss_function_helpers); BOOST_CHECK(model_trainer.checkLossFunctions()); loss_function_helpers.clear(); loss_function_helper1.output_nodes_ = { "Output000000000000", "Output00000000001", "Output000000000002" }; loss_function_helper1.loss_functions_ = { std::make_shared<BCEWithLogitsLossOp<float>>(BCEWithLogitsLossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_.clear(); loss_function_helpers.push_back(loss_function_helper1); loss_function_helper2.output_nodes_ = { "Mu000000000000", "Mu00000000001" }; loss_function_helper2.loss_functions_ = { std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper2); loss_function_helper3.output_nodes_ = { "Var000000000000", "Var00000000001" }; loss_function_helper3.loss_functions_ = { std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper3.loss_function_grads_ = { std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper3); model_trainer.setLossFunctionHelpers(loss_function_helpers); BOOST_CHECK(!model_trainer.checkLossFunctions()); } BOOST_AUTO_TEST_CASE(checkMetricFunctions) { ModelTrainerExt<float> model_trainer; BOOST_CHECK(model_trainer.checkMetricFunctions()); // NOTE: changed to not fail std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; metric_function_helper1.output_nodes_ = { "Output000000000000", "Output00000000001", "Output000000000002" }; metric_function_helper1.metric_functions_ = { std::make_shared<CosineSimilarityOp<float>>(CosineSimilarityOp<float>("Mean")), std::make_shared<CosineSimilarityOp<float>>(CosineSimilarityOp<float>("Var")), std::make_shared<PearsonROp<float>>(PearsonROp<float>("Mean")), std::make_shared<PearsonROp<float>>(PearsonROp<float>("Var")) }; metric_function_helper1.metric_names_ = { "CosineSimilarity-Mean", "CosineSimilarity-Var", "PearsonR-Mean", "PearsonR-Var" }; metric_function_helpers.push_back(metric_function_helper1); model_trainer.setMetricFunctionHelpers(metric_function_helpers); BOOST_CHECK(model_trainer.checkMetricFunctions()); metric_function_helpers.clear(); metric_function_helper1.output_nodes_ = { "Output000000000000", "Output00000000001", "Output000000000002" }; metric_function_helper1.metric_functions_ = { std::make_shared<CosineSimilarityOp<float>>(CosineSimilarityOp<float>("Mean")), std::make_shared<CosineSimilarityOp<float>>(CosineSimilarityOp<float>("Var")), std::make_shared<PearsonROp<float>>(PearsonROp<float>("Mean")), std::make_shared<PearsonROp<float>>(PearsonROp<float>("Var")) }; metric_function_helper1.metric_names_.clear(); metric_function_helpers.push_back(metric_function_helper1); model_trainer.setMetricFunctionHelpers(metric_function_helpers); BOOST_CHECK(!model_trainer.checkMetricFunctions()); } BOOST_AUTO_TEST_CASE(reduceLROnPlateau) { ModelTrainerDefaultDevice<float> trainer; std::vector<float> model_errors1 = { 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; float lr_new1 = trainer.reduceLROnPlateau(model_errors1, 0.1, 10, 3, 0.1); BOOST_CHECK_CLOSE(lr_new1, 1.0, 1e-3); std::vector<float> model_errors2 = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }; float lr_new2a = trainer.reduceLROnPlateau(model_errors2, 0.1, 11, 3, 0.1); BOOST_CHECK_CLOSE(lr_new2a, 1.0, 1e-3); // Too large of `n_epoch_avg` param float lr_new2 = trainer.reduceLROnPlateau(model_errors2, 0.1, 10, 3, 0.1); BOOST_CHECK_CLOSE(lr_new2, 0.1, 1e-3); } template<typename TensorT> class DAGToyModelTrainer : public ModelTrainerDefaultDevice<TensorT> { public: Model<TensorT> makeModel() { // CopyNPasted from Model_DAG_Test Node<TensorT> i1, i2, h1, h2, o1, o2, b1, b2; Link l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4; Weight<TensorT> w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4; Model<TensorT> model1; // Toy network: 1 hidden layer, fully connected, DAG i1 = Node<TensorT>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<TensorT>("1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<TensorT>("2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h2 = Node<TensorT>("3", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<TensorT>("4", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<TensorT>("5", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<TensorT>("6", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<TensorT>("7", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<TensorT>> weight_init; std::shared_ptr<SolverOp<TensorT>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); w1 = Weight<TensorT>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); w2 = Weight<TensorT>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); w3 = Weight<TensorT>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); w4 = Weight<TensorT>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); wb1 = Weight<TensorT>("4", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); wb2 = Weight<TensorT>("5", weight_init, solver); // input layer + bias l1 = Link("0", "0", "2", "0"); l2 = Link("1", "0", "3", "1"); l3 = Link("2", "1", "2", "2"); l4 = Link("3", "1", "3", "3"); lb1 = Link("4", "6", "2", "4"); lb2 = Link("5", "6", "3", "5"); // weights weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); w5 = Weight<TensorT>("6", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); w6 = Weight<TensorT>("7", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); w7 = Weight<TensorT>("8", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); w8 = Weight<TensorT>("9", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); wb3 = Weight<TensorT>("10", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); wb4 = Weight<TensorT>("11", weight_init, solver); // hidden layer + bias l5 = Link("6", "2", "4", "6"); l6 = Link("7", "2", "5", "7"); l7 = Link("8", "3", "4", "8"); l8 = Link("9", "3", "5", "9"); lb3 = Link("10", "7", "4", "10"); lb4 = Link("11", "7", "5", "11"); model1.setId(1); model1.addNodes({ i1, i2, h1, h2, o1, o2, b1, b2 }); model1.addWeights({ w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4 }); model1.addLinks({ l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4 }); model1.setInputAndOutputNodes(); return model1; } }; BOOST_AUTO_TEST_CASE(DAGToy1) { // Define the makeModel and trainModel scripts DAGToyModelTrainer<float> trainer; // Define the model resources ModelResources model_resources = { ModelDevice(0, 1) }; // Test parameters trainer.setBatchSize(4); trainer.setMemorySize(1); trainer.setNEpochsTraining(20); trainer.setNEpochsValidation(20); trainer.setLogging(false, false); const std::vector<std::string> input_nodes = {"0", "1", "6", "7"}; // true inputs + biases const std::vector<std::string> output_nodes = {"4", "5"}; std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); trainer.setLossFunctionHelpers(loss_function_helpers); // Make the input data Eigen::Tensor<float, 4> input_data(trainer.getBatchSize(), trainer.getMemorySize(), (int)input_nodes.size(), trainer.getNEpochsTraining()); Eigen::Tensor<float, 3> input_tmp(trainer.getBatchSize(), trainer.getMemorySize(), (int)input_nodes.size()); input_tmp.setValues({{{1, 5, 1, 1}}, {{2, 6, 1, 1}}, {{3, 7, 1, 1}}, {{4, 8, 1, 1}}}); for (int batch_iter=0; batch_iter<trainer.getBatchSize(); ++batch_iter) for (int memory_iter=0; memory_iter<trainer.getMemorySize(); ++memory_iter) for (int nodes_iter=0; nodes_iter<(int)input_nodes.size(); ++nodes_iter) for (int epochs_iter=0; epochs_iter<trainer.getNEpochsTraining(); ++epochs_iter) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = input_tmp(batch_iter, memory_iter, nodes_iter); // Make the output data Eigen::Tensor<float, 4> output_data(trainer.getBatchSize(), trainer.getMemorySize(), (int)output_nodes.size(), trainer.getNEpochsTraining()); Eigen::Tensor<float, 2> output_tmp(trainer.getBatchSize(), (int)output_nodes.size()); output_tmp.setValues({{0, 1}, {0, 1}, {0, 1}, {0, 1}}); for (int batch_iter=0; batch_iter<trainer.getBatchSize(); ++batch_iter){ for (int memory_iter = 0; memory_iter<trainer.getMemorySize(); ++memory_iter){ for (int nodes_iter=0; nodes_iter<(int)output_nodes.size(); ++nodes_iter){ for (int epochs_iter=0; epochs_iter<trainer.getNEpochsTraining(); ++epochs_iter){ if (memory_iter == 0) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = output_tmp(batch_iter, nodes_iter); else output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0.0; } } } } // Make the simulation time_steps Eigen::Tensor<float, 3> time_steps(trainer.getBatchSize(), trainer.getMemorySize(), trainer.getNEpochsTraining()); Eigen::Tensor<float, 2> time_steps_tmp(trainer.getBatchSize(), trainer.getMemorySize()); time_steps_tmp.setValues({ { 1 }, { 1 }, { 1 }, { 1 }} ); for (int batch_iter = 0; batch_iter<trainer.getBatchSize(); ++batch_iter) for (int memory_iter = 0; memory_iter<trainer.getMemorySize(); ++memory_iter) for (int epochs_iter = 0; epochs_iter<trainer.getNEpochsTraining(); ++epochs_iter) time_steps(batch_iter, memory_iter, epochs_iter) = time_steps_tmp(batch_iter, memory_iter); Model<float> model1 = trainer.makeModel(); trainer.trainModel(model1, input_data, output_data, time_steps, input_nodes, ModelLogger<float>(), ModelInterpreterDefaultDevice<float>(model_resources)); const Eigen::Tensor<float, 0> total_error = model1.getError().sum(); BOOST_CHECK(total_error(0) <= 757.0); std::vector<float> validation_errors = trainer.validateModel(model1, input_data, output_data, time_steps, input_nodes, ModelLogger<float>(), ModelInterpreterDefaultDevice<float>(model_resources)); const Eigen::Tensor<float, 0> total_error2 = model1.getError().sum(); BOOST_CHECK(total_error2(0) <= 757.0); BOOST_CHECK(validation_errors[0] <= 757.0); // TODO evaluateModel } BOOST_AUTO_TEST_CASE(DAGToy2) { // Define the makeModel and trainModel scripts DAGToyModelTrainer<float> trainer; // Define the model resources ModelResources model_resources = { ModelDevice(0, 1) }; // Test parameters trainer.setBatchSize(4); trainer.setMemorySize(1); trainer.setNEpochsTraining(20); trainer.setNEpochsValidation(20); trainer.setLogging(false, false); const std::vector<std::string> input_nodes = { "0", "1", "6", "7" }; // true inputs + biases const std::vector<std::string> output_nodes = { "4", "5" }; std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); trainer.setLossFunctionHelpers(loss_function_helpers); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>("Sum")) }; metric_function_helper1.metric_names_ = { "MAE"}; metric_function_helpers.push_back(metric_function_helper1); trainer.setMetricFunctionHelpers(metric_function_helpers); DataSimulatorDAGToy<float> data_simulator; Model<float> model1 = trainer.makeModel(); std::pair<std::vector<float>, std::vector<float>> errors = trainer.trainModel(model1, data_simulator, input_nodes, ModelLogger<float>(), ModelInterpreterDefaultDevice<float>(model_resources)); const Eigen::Tensor<float, 0> total_error = model1.getError().sum(); BOOST_CHECK_LE(total_error(0), 757.0); BOOST_CHECK_LE(errors.first.back(), 757.0); BOOST_CHECK_LE(errors.second.back(), 486.0); std::pair<std::vector<float>, std::vector<float>> validation_errors = trainer.validateModel(model1, data_simulator, input_nodes, ModelLogger<float>(), ModelInterpreterDefaultDevice<float>(model_resources)); const Eigen::Tensor<float, 0> total_error_validation = model1.getError().sum(); BOOST_CHECK_LE(total_error_validation(0), 749.853395); BOOST_CHECK_LE(validation_errors.first.back(), 749.853395); BOOST_CHECK_LE(validation_errors.second.back(), 455.849305); // TODO evaluateModel } template<typename TensorT> class DCGToyModelTrainer : public ModelTrainerDefaultDevice<TensorT> { public: Model<TensorT> makeModel() { // CopyNPasted from Model_DCG_Test Node<TensorT> i1, h1, o1, b1, b2; Link l1, l2, l3, lb1, lb2; Weight<TensorT> w1, w2, w3, wb1, wb2; Model<TensorT> model2; // Toy network: 1 hidden layer, fully connected, DCG i1 = Node<TensorT>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<TensorT>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ELUOp<float>>(ELUOp<float>()), std::make_shared<ELUGradOp<float>>(ELUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<TensorT>("2", NodeType::output, NodeStatus::initialized, std::make_shared<ELUOp<float>>(ELUOp<float>()), std::make_shared<ELUGradOp<float>>(ELUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<TensorT>("3", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<TensorT>("4", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<TensorT>> weight_init; std::shared_ptr<SolverOp<TensorT>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<TensorT>(0.01, 0.9, 0.999, 1e-8)); w1 = Weight<TensorT>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<TensorT>(0.01, 0.9, 0.999, 1e-8)); w2 = Weight<TensorT>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<TensorT>(0.01, 0.9, 0.999, 1e-8)); w3 = Weight<TensorT>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<TensorT>(0.01, 0.9, 0.999, 1e-8)); wb1 = Weight<TensorT>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); solver.reset(new AdamOp<TensorT>(0.01, 0.9, 0.999, 1e-8)); wb2 = Weight<TensorT>("4", weight_init, solver); weight_init.reset(); solver.reset(); // links l1 = Link("0", "0", "1", "0"); l2 = Link("1", "1", "2", "1"); l3 = Link("2", "1", "1", "2"); lb1 = Link("3", "3", "1", "3"); lb2 = Link("4", "4", "2", "4"); model2.setId(2); model2.addNodes({ i1, h1, o1, b1, b2 }); model2.addWeights({ w1, w2, w3, wb1, wb2 }); model2.addLinks({ l1, l2, l3, lb1, lb2 }); model2.setInputAndOutputNodes(); return model2; } }; BOOST_AUTO_TEST_CASE(DCGToy1) { // Define the makeModel and trainModel scripts DCGToyModelTrainer<float> trainer; // Define the model resources ModelResources model_resources = { ModelDevice(0, 1) }; // Test parameters trainer.setBatchSize(5); trainer.setMemorySize(8); trainer.setNEpochsTraining(50); trainer.setNEpochsValidation(50); const std::vector<std::string> input_nodes = {"0", "3", "4"}; // true inputs + biases const std::vector<std::string> output_nodes = {"2"}; std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); trainer.setLossFunctionHelpers(loss_function_helpers); // Make the input data Eigen::Tensor<float, 4> input_data(trainer.getBatchSize(), trainer.getMemorySize(), (int)input_nodes.size(), trainer.getNEpochsTraining()); Eigen::Tensor<float, 3> input_tmp(trainer.getBatchSize(), trainer.getMemorySize(), (int)input_nodes.size()); input_tmp.setValues( { {{8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}, {1, 0, 0}}, {{9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}, {2, 0, 0}}, {{10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}, {3, 0, 0}}, {{11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}, {4, 0, 0}}, {{12, 0, 0}, {11, 0, 0}, {10, 0, 0}, {9, 0, 0}, {8, 0, 0}, {7, 0, 0}, {6, 0, 0}, {5, 0, 0}} } ); for (int batch_iter=0; batch_iter<trainer.getBatchSize(); ++batch_iter) for (int memory_iter=0; memory_iter<trainer.getMemorySize(); ++memory_iter) for (int nodes_iter=0; nodes_iter<(int)input_nodes.size(); ++nodes_iter) for (int epochs_iter=0; epochs_iter<trainer.getNEpochsTraining(); ++epochs_iter) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = input_tmp(batch_iter, memory_iter, nodes_iter); // Make the output data Eigen::Tensor<float, 4> output_data(trainer.getBatchSize(), trainer.getMemorySize(), (int)output_nodes.size(), trainer.getNEpochsTraining()); Eigen::Tensor<float, 3> output_tmp(trainer.getBatchSize(), trainer.getMemorySize(), (int)output_nodes.size()); output_tmp.setValues( { { { 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 },{ 1 } }, { { 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 },{ 1 } }, { { 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 },{ 2 } }, { { 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 },{ 2 } }, { { 6 },{ 6 },{ 5 },{ 5 },{ 4 },{ 4 },{ 3 },{ 3 } } }); for (int batch_iter=0; batch_iter<trainer.getBatchSize(); ++batch_iter) for (int memory_iter = 0; memory_iter<trainer.getMemorySize(); ++memory_iter) for (int nodes_iter=0; nodes_iter<(int)output_nodes.size(); ++nodes_iter) for (int epochs_iter=0; epochs_iter<trainer.getNEpochsTraining(); ++epochs_iter) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = output_tmp(batch_iter, memory_iter, nodes_iter); // Make the simulation time_steps Eigen::Tensor<float, 3> time_steps(trainer.getBatchSize(), trainer.getMemorySize(), trainer.getNEpochsTraining()); Eigen::Tensor<float, 2> time_steps_tmp(trainer.getBatchSize(), trainer.getMemorySize()); time_steps_tmp.setValues({ {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1}} ); for (int batch_iter=0; batch_iter<trainer.getBatchSize(); ++batch_iter) for (int memory_iter=0; memory_iter<trainer.getMemorySize(); ++memory_iter) for (int epochs_iter=0; epochs_iter<trainer.getNEpochsTraining(); ++epochs_iter) time_steps(batch_iter, memory_iter, epochs_iter) = time_steps_tmp(batch_iter, memory_iter); Model<float> model1 = trainer.makeModel(); trainer.trainModel(model1, input_data, output_data, time_steps, input_nodes, ModelLogger<float>(), ModelInterpreterDefaultDevice<float>(model_resources)); const Eigen::Tensor<float, 0> total_error = model1.getError().sum(); BOOST_CHECK(total_error(0) <= 1492.6); std::vector<float> validation_errors = trainer.validateModel(model1, input_data, output_data, time_steps, input_nodes, ModelLogger<float>(), ModelInterpreterDefaultDevice<float>(model_resources)); const Eigen::Tensor<float, 0> total_error2 = model1.getError().sum(); BOOST_CHECK(total_error2(0) <= 1492.6); BOOST_CHECK(validation_errors[0] <= 1492.6); // TODO evaluateModel } BOOST_AUTO_TEST_CASE(DCGToy2) { // Define the makeModel and trainModel scripts DCGToyModelTrainer<float> trainer; // Define the model resources ModelResources model_resources = { ModelDevice(0, 1) }; // Test parameters trainer.setBatchSize(5); trainer.setMemorySize(8); trainer.setNEpochsTraining(50); trainer.setNEpochsValidation(50); const std::vector<std::string> input_nodes = { "0", "3", "4" }; // true inputs + biases const std::vector<std::string> output_nodes = { "2" }; std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); trainer.setLossFunctionHelpers(loss_function_helpers); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>("Sum")) }; metric_function_helper1.metric_names_ = { "MAE" }; metric_function_helpers.push_back(metric_function_helper1); trainer.setMetricFunctionHelpers(metric_function_helpers); // Make data simulator DataSimulatorDCGToy<float> data_simulator; Model<float> model1 = trainer.makeModel(); std::pair<std::vector<float>, std::vector<float>> errors = trainer.trainModel(model1, data_simulator, input_nodes, ModelLogger<float>(), ModelInterpreterDefaultDevice<float>(model_resources)); const Eigen::Tensor<float, 0> total_error2 = model1.getError().sum(); BOOST_CHECK(total_error2(0) <= 1492.6); BOOST_CHECK(errors.first.back() <= 1492.6); BOOST_CHECK(errors.second.back() <= 1492.6); // TODO evaluateModel } BOOST_AUTO_TEST_SUITE_END()<file_sep>Indices and tables ================== <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_DATASIMULATOR_H #define EVONET_DATASIMULATOR_H #include <unsupported/Eigen/CXX11/Tensor> namespace EvoNet { /** @brief Base class to implement a data generator or simulator */ template<typename TensorT> class DataSimulator { public: DataSimulator() = default; ///< Default constructor ~DataSimulator() = default; ///< Default destructor /** @brief Entry point to define the simulated data for training Overload creates the input and output data for the entire epoch @param[in, out] input Input Tensor for the model @param[in, out] output Output Tensor for the model @param[in, out] time_steps Time step tensor for the model */ virtual void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) {}; /** @brief Entry point to define the simulated data for training Overload creates the input and output data for a single epoch @param[in, out] input Input Tensor for the model @param[in, out] output Output Tensor for the model @param[in, out] time_steps Time step tensor for the model */ virtual void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 2>& time_steps) {}; /** @brief Entry point to define the simulated data for training Overload creates the input and output data for a single epoch @param[in, out] input_data Input Tensor for the model @param[in, out] loss_output_data Output Tensor for the model used to compute the loss function @param[in, out] metric_output_data Output Tensor for the model used to compute the model metrics @param[in, out] time_steps Time step tensor for the model */ virtual void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) {}; /** @brief Entry point to define the simulated data for testing/validation Overload creates the input and output data for the entire epoch @param[in, out] input Input Tensor for the model @param[in, out] output Output Tensor for the model @param[in, out] time_steps Time step tensor for the model */ virtual void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) {}; /** @brief Entry point to define the simulated data for testing/validation Overload creates the input and output data for a single epoch @param[in, out] input Input Tensor for the model @param[in, out] output Output Tensor for the model @param[in, out] time_steps Time step tensor for the model */ virtual void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 2>& time_steps) {}; /** @brief Entry point to define the simulated data for validation Overload creates the input and output data for a single epoch @param[in, out] input_data Input Tensor for the model @param[in, out] loss_output_data Output Tensor for the model used to compute the loss function @param[in, out] metric_output_data Output Tensor for the model used to compute the model metrics @param[in, out] time_steps Time step tensor for the model */ virtual void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) {}; /** @brief Entry point to define the simulation data for evaluation Overload creates the input and output data for the entire epoch @param[in, out] input Input Tensor for the model @param[in, out] output Output Tensor for the model @param[in, out] time_steps Time step tensor for the model */ virtual void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) {}; /** @brief Entry point to define the simulation data for evaluation Overload creates the input and output data for the entire epoch @param[in, out] input Input Tensor for the model @param[in, out] output Output Tensor for the model @param[in, out] time_steps Time step tensor for the model */ virtual void simulateEvaluationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) {}; }; } #endif //EVONET_DATASIMULATOR_H<file_sep>cpack_add_install_type(recommended DISPLAY_NAME "Recommended") cpack_add_install_type(full DISPLAY_NAME "Full") cpack_add_install_type(minimal DISPLAY_NAME "Minimal") cpack_add_component(share DISPLAY_NAME "EvoNet shared files" DESCRIPTION "EvoNet shared files" INSTALL_TYPES recommended full minimal ) cpack_add_component(library DISPLAY_NAME "Libraries" DESCRIPTION "Libraries" INSTALL_TYPES recommended full minimal ) cpack_add_component(applications DISPLAY_NAME "EvoNet binaries" DESCRIPTION "EvoNet binaries." INSTALL_TYPES recommended full minimal ) cpack_add_component(doc DISPLAY_NAME "Documentation" DESCRIPTION "Class and tool documentation. With tutorials." INSTALL_TYPES recommended full )<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelReplicatorExperimental.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/simulator/AddProbSimulator.h> #include <EvoNet/io/Parameters.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; template<typename TensorT> class DataSimulatorExt : public AddProbSimulator<TensorT> { public: void simulateData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); // sequence length const int sequence_length = n_input_nodes / 2; assert(sequence_length == this->sequence_length_); //// generate a new sequence //Eigen::Tensor<TensorT, 1> random_sequence(this->sequence_length_); //Eigen::Tensor<TensorT, 1> mask_sequence(this->sequence_length_); //float result = this->AddProb(random_sequence, mask_sequence, this->n_mask_); // Generate the input and output data for training [BUG FREE] for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) { for (int epochs_iter = 0; epochs_iter<n_epochs; ++epochs_iter) { // generate a new sequence Eigen::Tensor<TensorT, 1> random_sequence(this->sequence_length_); Eigen::Tensor<TensorT, 1> mask_sequence(this->sequence_length_); float result = this->AddProb(random_sequence, mask_sequence, this->n_mask_); for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes/2; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = random_sequence(nodes_iter); // random sequence input_data(batch_iter, memory_iter, nodes_iter + n_input_nodes/2, epochs_iter) = mask_sequence(nodes_iter); // mask sequence //std::cout << "Node: " << nodes_iter << ";Rand: " << input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) << ";Mask: " << input_data(batch_iter, memory_iter, nodes_iter + n_input_nodes / 2, epochs_iter) << std::endl; } for (int nodes_iter = 0; nodes_iter < n_output_nodes; ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = result; } } } } //std::cout << "Input data: " << input_data << std::endl; // [TESTS: convert to a test!] //std::cout << "Output data: " << output_data << std::endl; // [TESTS: convert to a test!] time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps)override { simulateData(input_data, output_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps)override { simulateData(input_data, output_data, time_steps); } void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) override {}; void simulateData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); // sequence length const int sequence_length = n_input_nodes / 2; assert(sequence_length == this->sequence_length_); // Generate the input and output data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { // generate a new sequence Eigen::Tensor<TensorT, 1> random_sequence(this->sequence_length_); Eigen::Tensor<TensorT, 1> mask_sequence(this->sequence_length_); float result = this->AddProb(random_sequence, mask_sequence, this->n_mask_); for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes / 2; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = random_sequence(nodes_iter); // random sequence input_data(batch_iter, memory_iter, nodes_iter + n_input_nodes / 2) = mask_sequence(nodes_iter); // mask sequence } for (int nodes_iter = 0; nodes_iter < n_output_nodes; ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter) = result; metric_data(batch_iter, memory_iter, nodes_iter) = result; } } } time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_data, Eigen::Tensor<TensorT, 2>& time_steps)override { simulateData(input_data, output_data, metric_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_data, Eigen::Tensor<TensorT, 2>& time_steps)override { simulateData(input_data, output_data, metric_data, time_steps); } void simulateEvaluationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& metric_data, Eigen::Tensor<TensorT, 2>& time_steps)override { simulateData(input_data, metric_data, Eigen::Tensor<TensorT, 3>(), time_steps); } }; // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerExperimentalDefaultDevice<TensorT> { public: /* @brief Minimal network */ void makeModelMinimal(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, int n_hidden_0 = 1, bool specify_layers = false) { model.setId(0); model.setName("AddProbAtt-Min"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_random = model_builder.addInputNodes(model, "Random", "Random", n_inputs, specify_layers); std::vector<std::string> node_names_mask = model_builder.addInputNodes(model, "Mask", "Mask", n_inputs, specify_layers); // Define the activation std::shared_ptr<ActivationOp<TensorT>> activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_output = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_output_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<SGDOp<TensorT>>(SGDOp<TensorT>(1e-3, 0.9, 10)); // Add the hidden layer std::vector<std::string> node_names = model_builder.addFullyConnected(model, "HiddenR", "HiddenR", node_names_random, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names_random.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); model_builder.addFullyConnected(model, "HiddenR", node_names_mask, node_names, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names_mask.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, specify_layers); // Add the output layer node_names = model_builder.addFullyConnected(model, "FC-Out", "FC-Out", node_names, n_outputs, activation_output, activation_output_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 2)), solver_op, 0.0f, 0.0f, false, true); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::unmodifiable); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } /* @brief Minimal network required to solve the addition problem */ void makeModelSolution(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, bool init_weight_soln = true, bool specify_layers = false) { model.setId(0); model.setName("AddProbAtt-Solution"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_random = model_builder.addInputNodes(model, "Random", "Random", n_inputs, specify_layers); std::vector<std::string> node_names_mask = model_builder.addInputNodes(model, "Mask", "Mask", n_inputs, specify_layers); // Define the activation std::shared_ptr<ActivationOp<TensorT>> activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_output = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_output_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver and weight init ops std::shared_ptr<SolverOp<TensorT>> solver_op; std::shared_ptr<WeightInitOp<TensorT>> weight_init_1, weight_init_2; if (init_weight_soln) { solver_op = std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()); weight_init_1 = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)); weight_init_2 = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)); } else { solver_op = std::make_shared<SGDOp<TensorT>>(SGDOp<TensorT>(1e-3, 0.9, 10)); weight_init_1 = std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names_random.size() + n_inputs) / 2, 1)); weight_init_2 = std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(n_inputs, 2)); } // Add the hidden layer std::vector<std::string> node_names = model_builder.addSinglyConnected(model, "HiddenR", "HiddenR", node_names_random, n_inputs, activation, activation_grad, std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()), std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>()), weight_init_1, solver_op, 0.0f, 0.0f, false, specify_layers); model_builder.addSinglyConnected(model, "HiddenR", node_names_mask, node_names, weight_init_1, solver_op, 0.0f, specify_layers); // Add the output layer node_names = model_builder.addFullyConnected(model, "FC-Out", "FC-Out", node_names, n_outputs, activation_output, activation_output_grad, integration_op, integration_error_op, integration_weight_grad_op, weight_init_2, solver_op, 0.0f, 0.0f, false, specify_layers); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::unmodifiable); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicatorExperimental<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerExperimentalDefaultDevice<TensorT> {}; template<class ...ParameterTypes> void main_(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the population trainer parameters PopulationTrainerExt<float> population_trainer; setPopulationTrainerParameters(population_trainer, args...); // define the population logger PopulationLogger<float> population_logger(true, true); // define the data simulator DataSimulatorExt<float> data_simulator; data_simulator.n_mask_ = std::get<EvoNetParameters::Examples::NMask>(parameters).get(); data_simulator.sequence_length_ = std::get<EvoNetParameters::Examples::SequenceLength>(parameters).get(); // define the input/output nodes std::vector<std::string> input_nodes; for (int i = 0; i < data_simulator.sequence_length_; ++i) { char name_char[512]; sprintf(name_char, "Random_%012d", i); std::string name(name_char); input_nodes.push_back(name); } for (int i = 0; i < data_simulator.sequence_length_; ++i) { char name_char[512]; sprintf(name_char, "Mask_%012d", i); std::string name(name_char); input_nodes.push_back(name); } std::vector<std::string> output_nodes = { "Output_000000000000" }; // define the model interpreters std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; setModelInterpreterParameters(model_interpreters, args...); // define the model trainer ModelTrainerExt<float> model_trainer; setModelTrainerParameters(model_trainer, args...); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper2; loss_function_helper2.output_nodes_ = output_nodes; loss_function_helper2.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-24, 1.0)) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-24, 1.0)) }; loss_function_helpers.push_back(loss_function_helper2); model_trainer.setLossFunctionHelpers(loss_function_helpers); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<EuclideanDistOp<float>>(EuclideanDistOp<float>("Mean")), std::make_shared<EuclideanDistOp<float>>(EuclideanDistOp<float>("Var")) }; metric_function_helper1.metric_names_ = { "EuclideanDist-Mean", "EuclideanDist-Var" }; metric_function_helpers.push_back(metric_function_helper1); model_trainer.setMetricFunctionHelpers(metric_function_helpers); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false); // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; setModelReplicatorParameters(model_replicator, args...); // define the initial population Model<float> model; if (std::get<EvoNetParameters::Main::MakeModel>(parameters).get()) { std::cout << "Making the model..." << std::endl; if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "Minimal") model_trainer.makeModelMinimal(model, input_nodes.size() / 2, output_nodes.size()); else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "Solution") model_trainer.makeModelSolution(model, input_nodes.size() / 2, output_nodes.size(), false, false); //else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "DotProdAtt") model_trainer.makeModelDotProdAtt(model, (int)(input_nodes.size() / 2), output_nodes.size(), { 4 }, { 8 }, { 16 }, false, false, false, true); model.setId(0); } else { ModelFile<float> model_file; ModelInterpreterFileDefaultDevice<float> model_interpreter_file; loadModelFromParameters(model, model_interpreters.at(0), model_file, model_interpreter_file, args...); } model.setName(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get()); //So that all output will be written to a specific directory // Run the training, evaluation, or evolution runTrainEvalEvoFromParameters<float>(model, model_interpreters, model_trainer, population_trainer, model_replicator, data_simulator, model_logger, population_logger, input_nodes, args...); } // Main int main(int argc, char** argv) { // Parse the user commands int id_int = -1; std::string parameters_filename = ""; parseCommandLineArguments(argc, argv, id_int, parameters_filename); // Set the parameter names and defaults EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::Main::DeviceId device_id("device_id", 0); EvoNetParameters::Main::ModelName model_name("model_name", ""); EvoNetParameters::Main::MakeModel make_model("make_model", true); EvoNetParameters::Main::LoadModelCsv load_model_csv("load_model_csv", false); EvoNetParameters::Main::LoadModelBinary load_model_binary("load_model_binary", false); EvoNetParameters::Main::TrainModel train_model("train_model", true); EvoNetParameters::Main::EvolveModel evolve_model("evolve_model", false); EvoNetParameters::Main::EvaluateModel evaluate_model("evaluate_model", false); EvoNetParameters::Main::EvaluateModels evaluate_models("evaluate_models", false); EvoNetParameters::Examples::NMask n_mask("n_mask", 2); EvoNetParameters::Examples::SequenceLength sequence_length("sequence_length", 25); EvoNetParameters::Examples::ModelType model_type("model_type", "Solution"); EvoNetParameters::Examples::SimulationType simulation_type("simulation_type", ""); EvoNetParameters::Examples::BiochemicalRxnsFilename biochemical_rxns_filename("biochemical_rxns_filename", "iJO1366.csv"); EvoNetParameters::PopulationTrainer::PopulationName population_name("population_name", ""); EvoNetParameters::PopulationTrainer::NGenerations n_generations("n_generations", 1); EvoNetParameters::PopulationTrainer::NInterpreters n_interpreters("n_interpreters", 1); EvoNetParameters::PopulationTrainer::PruneModelNum prune_model_num("prune_model_num", 10); EvoNetParameters::PopulationTrainer::RemoveIsolatedNodes remove_isolated_nodes("remove_isolated_nodes", true); EvoNetParameters::PopulationTrainer::CheckCompleteModelInputToOutput check_complete_model_input_to_output("check_complete_model_input_to_output", true); EvoNetParameters::PopulationTrainer::PopulationSize population_size("population_size", 128); EvoNetParameters::PopulationTrainer::NTop n_top("n_top", 8); EvoNetParameters::PopulationTrainer::NRandom n_random("n_random", 8); EvoNetParameters::PopulationTrainer::NReplicatesPerModel n_replicates_per_model("n_replicates_per_model", 1); EvoNetParameters::PopulationTrainer::ResetModelCopyWeights reset_model_copy_weights("reset_model_copy_weights", true); EvoNetParameters::PopulationTrainer::ResetModelTemplateWeights reset_model_template_weights("reset_model_template_weights", true); EvoNetParameters::PopulationTrainer::Logging population_logging("population_logging", true); EvoNetParameters::PopulationTrainer::SetPopulationSizeFixed set_population_size_fixed("set_population_size_fixed", false); EvoNetParameters::PopulationTrainer::SetPopulationSizeDoubling set_population_size_doubling("set_population_size_doubling", true); EvoNetParameters::PopulationTrainer::SetTrainingStepsByModelSize set_training_steps_by_model_size("set_training_steps_by_model_size", false); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 32); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 64); EvoNetParameters::ModelTrainer::NEpochsTraining n_epochs_training("n_epochs_training", 1000); EvoNetParameters::ModelTrainer::NEpochsValidation n_epochs_validation("n_epochs_validation", 25); EvoNetParameters::ModelTrainer::NEpochsEvaluation n_epochs_evaluation("n_epochs_evaluation", 10); EvoNetParameters::ModelTrainer::NTBTTSteps n_tbtt_steps("n_tbtt_steps", 64); EvoNetParameters::ModelTrainer::NTETTSteps n_tett_steps("n_tett_steps", 64); EvoNetParameters::ModelTrainer::Verbosity verbosity("verbosity", 1); EvoNetParameters::ModelTrainer::LoggingTraining logging_training("logging_training", true); EvoNetParameters::ModelTrainer::LoggingValidation logging_validation("logging_validation", false); EvoNetParameters::ModelTrainer::LoggingEvaluation logging_evaluation("logging_evaluation", true); EvoNetParameters::ModelTrainer::FindCycles find_cycles("find_cycles", true); EvoNetParameters::ModelTrainer::FastInterpreter fast_interpreter("fast_interpreter", true); EvoNetParameters::ModelTrainer::PreserveOoO preserve_ooo("preserve_ooo", true); EvoNetParameters::ModelTrainer::InterpretModel interpret_model("interpret_model", true); EvoNetParameters::ModelTrainer::ResetModel reset_model("reset_model", false); EvoNetParameters::ModelTrainer::ResetInterpreter reset_interpreter("reset_interpreter", true); EvoNetParameters::ModelReplicator::NNodeDownAdditionsLB n_node_down_additions_lb("n_node_down_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsLB n_node_right_additions_lb("n_node_right_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesLB n_node_down_copies_lb("n_node_down_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesLB n_node_right_copies_lb("n_node_right_copies_lb", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsLB n_link_additons_lb("n_link_additons_lb", 0); EvoNetParameters::ModelReplicator::NLinkCopiesLB n_link_copies_lb("n_link_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsLB n_node_deletions_lb("n_node_deletions_lb", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsLB n_link_deletions_lb("n_link_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesLB n_node_activation_changes_lb("n_node_activation_changes_lb", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesLB n_node_integration_changes_lb("n_node_integration_changes_lb", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsLB n_module_additions_lb("n_module_additions_lb", 0); EvoNetParameters::ModelReplicator::NModuleCopiesLB n_module_copies_lb("n_module_copies_lb", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsLB n_module_deletions_lb("n_module_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownAdditionsUB n_node_down_additions_ub("n_node_down_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsUB n_node_right_additions_ub("n_node_right_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesUB n_node_down_copies_ub("n_node_down_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesUB n_node_right_copies_ub("n_node_right_copies_ub", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsUB n_link_additons_ub("n_link_additons_ub", 0); EvoNetParameters::ModelReplicator::NLinkCopiesUB n_link_copies_ub("n_link_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsUB n_node_deletions_ub("n_node_deletions_ub", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsUB n_link_deletions_ub("n_link_deletions_ub", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesUB n_node_activation_changes_ub("n_node_activation_changes_ub", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesUB n_node_integration_changes_ub("n_node_integration_changes_ub", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsUB n_module_additions_ub("n_module_additions_ub", 0); EvoNetParameters::ModelReplicator::NModuleCopiesUB n_module_copies_ub("n_module_copies_ub", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsUB n_module_deletions_ub("n_module_deletions_ub", 0); EvoNetParameters::ModelReplicator::SetModificationRateFixed set_modification_rate_fixed("set_modification_rate_fixed", false); EvoNetParameters::ModelReplicator::SetModificationRateByPrevError set_modification_rate_by_prev_error("set_modification_rate_by_prev_error", false); auto parameters = std::make_tuple(id, data_dir, device_id, model_name, make_model, load_model_csv, load_model_binary, train_model, evolve_model, evaluate_model, evaluate_models, n_mask, sequence_length, model_type, simulation_type, biochemical_rxns_filename, population_name, n_generations, n_interpreters, prune_model_num, remove_isolated_nodes, check_complete_model_input_to_output, population_size, n_top, n_random, n_replicates_per_model, reset_model_copy_weights, reset_model_template_weights, population_logging, set_population_size_fixed, set_population_size_doubling, set_training_steps_by_model_size, batch_size, memory_size, n_epochs_training, n_epochs_validation, n_epochs_evaluation, n_tbtt_steps, n_tett_steps, verbosity, logging_training, logging_validation, logging_evaluation, find_cycles, fast_interpreter, preserve_ooo, interpret_model, reset_model, reset_interpreter, n_node_down_additions_lb, n_node_right_additions_lb, n_node_down_copies_lb, n_node_right_copies_lb, n_link_additons_lb, n_link_copies_lb, n_node_deletions_lb, n_link_deletions_lb, n_node_activation_changes_lb, n_node_integration_changes_lb, n_module_additions_lb, n_module_copies_lb, n_module_deletions_lb, n_node_down_additions_ub, n_node_right_additions_ub, n_node_down_copies_ub, n_node_right_copies_ub, n_link_additons_ub, n_link_copies_ub, n_node_deletions_ub, n_link_deletions_ub, n_node_activation_changes_ub, n_node_integration_changes_ub, n_module_additions_ub, n_module_copies_ub, n_module_deletions_ub, set_modification_rate_fixed, set_modification_rate_by_prev_error); // Read in the parameters LoadParametersFromCsv loadParametersFromCsv(id_int, parameters_filename); parameters = EvoNet::apply([&loadParametersFromCsv](auto&& ...args) { return loadParametersFromCsv(args...); }, parameters); // Run the application EvoNet::apply([](auto&& ...args) { main_(args ...); }, parameters); return 0; }<file_sep>/**TODO: Add copyright*/ #include "Metabolomics_example.h" using namespace SmartPeak; /* @brief Example using blood metabolomics data from three different blood fractions including Platelet (PLT), Red blood cells (RBC), and Plasma (P) fractions from two experiments including an in vivo pre/post drug response and ex vivo drug response */ // Scripts to run void main_statistics_timecourseSummary(std::string blood_fraction = "PLT", bool run_timeCourse_S01D01 = false, bool run_timeCourse_S01D02 = false, bool run_timeCourse_S01D03 = false, bool run_timeCourse_S01D04 = false, bool run_timeCourse_S01D05 = false, bool run_timeCourse_S02D01 = false, bool run_timeCourse_S02D02 = false, bool run_timeCourse_S02D03 = false, bool run_timeCourse_S02D04 = false, bool run_timeCourse_S02D05 = false, bool run_timeCourse_S01D01vsS01D02 = false, bool run_timeCourse_S01D01vsS01D03 = false, bool run_timeCourse_S01D01vsS01D04 = false, bool run_timeCourse_S01D01vsS01D05 = false, bool run_timeCourse_S02D01vsS02D02 = false, bool run_timeCourse_S02D01vsS02D03 = false, bool run_timeCourse_S02D01vsS02D04 = false, bool run_timeCourse_S02D01vsS02D05 = false) { // define the data simulator BiochemicalReactionModel<float> metabolomics_data; // data dirs //std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; //std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; std::string data_dir = "/home/user/Data/"; std::string timeCourse_S01D01_filename, timeCourse_S01D02_filename, timeCourse_S01D03_filename, timeCourse_S01D04_filename, timeCourse_S01D05_filename, timeCourse_S02D01_filename, timeCourse_S02D02_filename, timeCourse_S02D03_filename, timeCourse_S02D04_filename, timeCourse_S02D05_filename, timeCourse_S01D01vsS01D02_filename, timeCourse_S01D01vsS01D03_filename, timeCourse_S01D01vsS01D04_filename, timeCourse_S01D01vsS01D05_filename, timeCourse_S02D01vsS02D02_filename, timeCourse_S02D01vsS02D03_filename, timeCourse_S02D01vsS02D04_filename, timeCourse_S02D01vsS02D05_filename, timeCourseSampleSummary_S01D01_filename, timeCourseSampleSummary_S01D02_filename, timeCourseSampleSummary_S01D03_filename, timeCourseSampleSummary_S01D04_filename, timeCourseSampleSummary_S01D05_filename, timeCourseSampleSummary_S02D01_filename, timeCourseSampleSummary_S02D02_filename, timeCourseSampleSummary_S02D03_filename, timeCourseSampleSummary_S02D04_filename, timeCourseSampleSummary_S02D05_filename, timeCourseSampleSummary_S01D01vsS01D02_filename, timeCourseSampleSummary_S01D01vsS01D03_filename, timeCourseSampleSummary_S01D01vsS01D04_filename, timeCourseSampleSummary_S01D01vsS01D05_filename, timeCourseSampleSummary_S02D01vsS02D02_filename, timeCourseSampleSummary_S02D01vsS02D03_filename, timeCourseSampleSummary_S02D01vsS02D04_filename, timeCourseSampleSummary_S02D01vsS02D05_filename, timeCourseFeatureSummary_S01D01_filename, timeCourseFeatureSummary_S01D02_filename, timeCourseFeatureSummary_S01D03_filename, timeCourseFeatureSummary_S01D04_filename, timeCourseFeatureSummary_S01D05_filename, timeCourseFeatureSummary_S02D01_filename, timeCourseFeatureSummary_S02D02_filename, timeCourseFeatureSummary_S02D03_filename, timeCourseFeatureSummary_S02D04_filename, timeCourseFeatureSummary_S02D05_filename, timeCourseFeatureSummary_S01D01vsS01D02_filename, timeCourseFeatureSummary_S01D01vsS01D03_filename, timeCourseFeatureSummary_S01D01vsS01D04_filename, timeCourseFeatureSummary_S01D01vsS01D05_filename, timeCourseFeatureSummary_S02D01vsS02D02_filename, timeCourseFeatureSummary_S02D01vsS02D03_filename, timeCourseFeatureSummary_S02D01vsS02D04_filename, timeCourseFeatureSummary_S02D01vsS02D05_filename; if (blood_fraction == "RBC") { // RBC filenames timeCourse_S01D01_filename = data_dir + "RBC_timeCourse_S01D01.csv"; timeCourse_S01D02_filename = data_dir + "RBC_timeCourse_S01D02.csv"; timeCourse_S01D03_filename = data_dir + "RBC_timeCourse_S01D03.csv"; timeCourse_S01D04_filename = data_dir + "RBC_timeCourse_S01D04.csv"; timeCourse_S01D05_filename = data_dir + "RBC_timeCourse_S01D05.csv"; timeCourse_S02D01_filename = data_dir + "RBC_timeCourse_S02D01.csv"; timeCourse_S02D02_filename = data_dir + "RBC_timeCourse_S02D02.csv"; timeCourse_S02D03_filename = data_dir + "RBC_timeCourse_S02D03.csv"; timeCourse_S02D04_filename = data_dir + "RBC_timeCourse_S02D04.csv"; timeCourse_S02D05_filename = data_dir + "RBC_timeCourse_S02D05.csv"; timeCourse_S01D01vsS01D02_filename = data_dir + "RBC_timeCourse_S01D01vsS01D02.csv"; timeCourse_S01D01vsS01D03_filename = data_dir + "RBC_timeCourse_S01D01vsS01D03.csv"; timeCourse_S01D01vsS01D04_filename = data_dir + "RBC_timeCourse_S01D01vsS01D04.csv"; timeCourse_S01D01vsS01D05_filename = data_dir + "RBC_timeCourse_S01D01vsS01D05.csv"; timeCourse_S02D01vsS02D02_filename = data_dir + "RBC_timeCourse_S02D01vsS02D02.csv"; timeCourse_S02D01vsS02D03_filename = data_dir + "RBC_timeCourse_S02D01vsS02D03.csv"; timeCourse_S02D01vsS02D04_filename = data_dir + "RBC_timeCourse_S02D01vsS02D04.csv"; timeCourse_S02D01vsS02D05_filename = data_dir + "RBC_timeCourse_S02D01vsS02D05.csv"; timeCourseSampleSummary_S01D01_filename = data_dir + "RBC_timeCourseSampleSummary_S01D01.csv"; timeCourseSampleSummary_S01D02_filename = data_dir + "RBC_timeCourseSampleSummary_S01D02.csv"; timeCourseSampleSummary_S01D03_filename = data_dir + "RBC_timeCourseSampleSummary_S01D03.csv"; timeCourseSampleSummary_S01D04_filename = data_dir + "RBC_timeCourseSampleSummary_S01D04.csv"; timeCourseSampleSummary_S01D05_filename = data_dir + "RBC_timeCourseSampleSummary_S01D05.csv"; timeCourseSampleSummary_S02D01_filename = data_dir + "RBC_timeCourseSampleSummary_S02D01.csv"; timeCourseSampleSummary_S02D02_filename = data_dir + "RBC_timeCourseSampleSummary_S02D02.csv"; timeCourseSampleSummary_S02D03_filename = data_dir + "RBC_timeCourseSampleSummary_S02D03.csv"; timeCourseSampleSummary_S02D04_filename = data_dir + "RBC_timeCourseSampleSummary_S02D04.csv"; timeCourseSampleSummary_S02D05_filename = data_dir + "RBC_timeCourseSampleSummary_S02D05.csv"; timeCourseSampleSummary_S01D01vsS01D02_filename = data_dir + "RBC_timeCourseSampleSummary_S01D01vsS01D02.csv"; timeCourseSampleSummary_S01D01vsS01D03_filename = data_dir + "RBC_timeCourseSampleSummary_S01D01vsS01D03.csv"; timeCourseSampleSummary_S01D01vsS01D04_filename = data_dir + "RBC_timeCourseSampleSummary_S01D01vsS01D04.csv"; timeCourseSampleSummary_S01D01vsS01D05_filename = data_dir + "RBC_timeCourseSampleSummary_S01D01vsS01D05.csv"; timeCourseSampleSummary_S02D01vsS02D02_filename = data_dir + "RBC_timeCourseSampleSummary_S02D01vsS02D02.csv"; timeCourseSampleSummary_S02D01vsS02D03_filename = data_dir + "RBC_timeCourseSampleSummary_S02D01vsS02D03.csv"; timeCourseSampleSummary_S02D01vsS02D04_filename = data_dir + "RBC_timeCourseSampleSummary_S02D01vsS02D04.csv"; timeCourseSampleSummary_S02D01vsS02D05_filename = data_dir + "RBC_timeCourseSampleSummary_S02D01vsS02D05.csv"; timeCourseFeatureSummary_S01D01_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D01.csv"; timeCourseFeatureSummary_S01D02_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D02.csv"; timeCourseFeatureSummary_S01D03_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D03.csv"; timeCourseFeatureSummary_S01D04_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D04.csv"; timeCourseFeatureSummary_S01D05_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D05.csv"; timeCourseFeatureSummary_S02D01_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D01.csv"; timeCourseFeatureSummary_S02D02_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D02.csv"; timeCourseFeatureSummary_S02D03_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D03.csv"; timeCourseFeatureSummary_S02D04_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D04.csv"; timeCourseFeatureSummary_S02D05_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D05.csv"; timeCourseFeatureSummary_S01D01vsS01D02_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D01vsS01D02.csv"; timeCourseFeatureSummary_S01D01vsS01D03_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D01vsS01D03.csv"; timeCourseFeatureSummary_S01D01vsS01D04_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D01vsS01D04.csv"; timeCourseFeatureSummary_S01D01vsS01D05_filename = data_dir + "RBC_timeCourseFeatureSummary_S01D01vsS01D05.csv"; timeCourseFeatureSummary_S02D01vsS02D02_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D01vsS02D02.csv"; timeCourseFeatureSummary_S02D01vsS02D03_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D01vsS02D03.csv"; timeCourseFeatureSummary_S02D01vsS02D04_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D01vsS02D04.csv"; timeCourseFeatureSummary_S02D01vsS02D05_filename = data_dir + "RBC_timeCourseFeatureSummary_S02D01vsS02D05.csv"; } else if (blood_fraction == "PLT") { // PLT filenames timeCourse_S01D01_filename = data_dir + "PLT_timeCourse_S01D01.csv"; timeCourse_S01D02_filename = data_dir + "PLT_timeCourse_S01D02.csv"; timeCourse_S01D03_filename = data_dir + "PLT_timeCourse_S01D03.csv"; timeCourse_S01D04_filename = data_dir + "PLT_timeCourse_S01D04.csv"; timeCourse_S01D05_filename = data_dir + "PLT_timeCourse_S01D05.csv"; timeCourse_S02D01_filename = data_dir + "PLT_timeCourse_S02D01.csv"; timeCourse_S02D02_filename = data_dir + "PLT_timeCourse_S02D02.csv"; timeCourse_S02D03_filename = data_dir + "PLT_timeCourse_S02D03.csv"; timeCourse_S02D04_filename = data_dir + "PLT_timeCourse_S02D04.csv"; timeCourse_S02D05_filename = data_dir + "PLT_timeCourse_S02D05.csv"; timeCourse_S01D01vsS01D02_filename = data_dir + "PLT_timeCourse_S01D01vsS01D02.csv"; timeCourse_S01D01vsS01D03_filename = data_dir + "PLT_timeCourse_S01D01vsS01D03.csv"; timeCourse_S01D01vsS01D04_filename = data_dir + "PLT_timeCourse_S01D01vsS01D04.csv"; timeCourse_S01D01vsS01D05_filename = data_dir + "PLT_timeCourse_S01D01vsS01D05.csv"; timeCourse_S02D01vsS02D02_filename = data_dir + "PLT_timeCourse_S02D01vsS02D02.csv"; timeCourse_S02D01vsS02D03_filename = data_dir + "PLT_timeCourse_S02D01vsS02D03.csv"; timeCourse_S02D01vsS02D04_filename = data_dir + "PLT_timeCourse_S02D01vsS02D04.csv"; timeCourse_S02D01vsS02D05_filename = data_dir + "PLT_timeCourse_S02D01vsS02D05.csv"; timeCourseSampleSummary_S01D01_filename = data_dir + "PLT_timeCourseSampleSummary_S01D01.csv"; timeCourseSampleSummary_S01D02_filename = data_dir + "PLT_timeCourseSampleSummary_S01D02.csv"; timeCourseSampleSummary_S01D03_filename = data_dir + "PLT_timeCourseSampleSummary_S01D03.csv"; timeCourseSampleSummary_S01D04_filename = data_dir + "PLT_timeCourseSampleSummary_S01D04.csv"; timeCourseSampleSummary_S01D05_filename = data_dir + "PLT_timeCourseSampleSummary_S01D05.csv"; timeCourseSampleSummary_S02D01_filename = data_dir + "PLT_timeCourseSampleSummary_S02D01.csv"; timeCourseSampleSummary_S02D02_filename = data_dir + "PLT_timeCourseSampleSummary_S02D02.csv"; timeCourseSampleSummary_S02D03_filename = data_dir + "PLT_timeCourseSampleSummary_S02D03.csv"; timeCourseSampleSummary_S02D04_filename = data_dir + "PLT_timeCourseSampleSummary_S02D04.csv"; timeCourseSampleSummary_S02D05_filename = data_dir + "PLT_timeCourseSampleSummary_S02D05.csv"; timeCourseSampleSummary_S01D01vsS01D02_filename = data_dir + "PLT_timeCourseSampleSummary_S01D01vsS01D02.csv"; timeCourseSampleSummary_S01D01vsS01D03_filename = data_dir + "PLT_timeCourseSampleSummary_S01D01vsS01D03.csv"; timeCourseSampleSummary_S01D01vsS01D04_filename = data_dir + "PLT_timeCourseSampleSummary_S01D01vsS01D04.csv"; timeCourseSampleSummary_S01D01vsS01D05_filename = data_dir + "PLT_timeCourseSampleSummary_S01D01vsS01D05.csv"; timeCourseSampleSummary_S02D01vsS02D02_filename = data_dir + "PLT_timeCourseSampleSummary_S02D01vsS02D02.csv"; timeCourseSampleSummary_S02D01vsS02D03_filename = data_dir + "PLT_timeCourseSampleSummary_S02D01vsS02D03.csv"; timeCourseSampleSummary_S02D01vsS02D04_filename = data_dir + "PLT_timeCourseSampleSummary_S02D01vsS02D04.csv"; timeCourseSampleSummary_S02D01vsS02D05_filename = data_dir + "PLT_timeCourseSampleSummary_S02D01vsS02D05.csv"; timeCourseFeatureSummary_S01D01_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D01.csv"; timeCourseFeatureSummary_S01D02_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D02.csv"; timeCourseFeatureSummary_S01D03_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D03.csv"; timeCourseFeatureSummary_S01D04_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D04.csv"; timeCourseFeatureSummary_S01D05_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D05.csv"; timeCourseFeatureSummary_S02D01_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D01.csv"; timeCourseFeatureSummary_S02D02_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D02.csv"; timeCourseFeatureSummary_S02D03_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D03.csv"; timeCourseFeatureSummary_S02D04_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D04.csv"; timeCourseFeatureSummary_S02D05_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D05.csv"; timeCourseFeatureSummary_S01D01vsS01D02_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D01vsS01D02.csv"; timeCourseFeatureSummary_S01D01vsS01D03_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D01vsS01D03.csv"; timeCourseFeatureSummary_S01D01vsS01D04_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D01vsS01D04.csv"; timeCourseFeatureSummary_S01D01vsS01D05_filename = data_dir + "PLT_timeCourseFeatureSummary_S01D01vsS01D05.csv"; timeCourseFeatureSummary_S02D01vsS02D02_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D01vsS02D02.csv"; timeCourseFeatureSummary_S02D01vsS02D03_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D01vsS02D03.csv"; timeCourseFeatureSummary_S02D01vsS02D04_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D01vsS02D04.csv"; timeCourseFeatureSummary_S02D01vsS02D05_filename = data_dir + "PLT_timeCourseFeatureSummary_S02D01vsS02D05.csv"; } else if (blood_fraction == "P") { // P filenames timeCourse_S01D01_filename = data_dir + "P_timeCourse_S01D01.csv"; timeCourse_S01D02_filename = data_dir + "P_timeCourse_S01D02.csv"; timeCourse_S01D03_filename = data_dir + "P_timeCourse_S01D03.csv"; timeCourse_S01D04_filename = data_dir + "P_timeCourse_S01D04.csv"; timeCourse_S01D05_filename = data_dir + "P_timeCourse_S01D05.csv"; timeCourse_S02D01_filename = data_dir + "P_timeCourse_S02D01.csv"; timeCourse_S02D02_filename = data_dir + "P_timeCourse_S02D02.csv"; timeCourse_S02D03_filename = data_dir + "P_timeCourse_S02D03.csv"; timeCourse_S02D04_filename = data_dir + "P_timeCourse_S02D04.csv"; timeCourse_S02D05_filename = data_dir + "P_timeCourse_S02D05.csv"; timeCourse_S01D01vsS01D02_filename = data_dir + "P_timeCourse_S01D01vsS01D02.csv"; timeCourse_S01D01vsS01D03_filename = data_dir + "P_timeCourse_S01D01vsS01D03.csv"; timeCourse_S01D01vsS01D04_filename = data_dir + "P_timeCourse_S01D01vsS01D04.csv"; timeCourse_S01D01vsS01D05_filename = data_dir + "P_timeCourse_S01D01vsS01D05.csv"; timeCourse_S02D01vsS02D02_filename = data_dir + "P_timeCourse_S02D01vsS02D02.csv"; timeCourse_S02D01vsS02D03_filename = data_dir + "P_timeCourse_S02D01vsS02D03.csv"; timeCourse_S02D01vsS02D04_filename = data_dir + "P_timeCourse_S02D01vsS02D04.csv"; timeCourse_S02D01vsS02D05_filename = data_dir + "P_timeCourse_S02D01vsS02D05.csv"; timeCourseSampleSummary_S01D01_filename = data_dir + "P_timeCourseSampleSummary_S01D01.csv"; timeCourseSampleSummary_S01D02_filename = data_dir + "P_timeCourseSampleSummary_S01D02.csv"; timeCourseSampleSummary_S01D03_filename = data_dir + "P_timeCourseSampleSummary_S01D03.csv"; timeCourseSampleSummary_S01D04_filename = data_dir + "P_timeCourseSampleSummary_S01D04.csv"; timeCourseSampleSummary_S01D05_filename = data_dir + "P_timeCourseSampleSummary_S01D05.csv"; timeCourseSampleSummary_S02D01_filename = data_dir + "P_timeCourseSampleSummary_S02D01.csv"; timeCourseSampleSummary_S02D02_filename = data_dir + "P_timeCourseSampleSummary_S02D02.csv"; timeCourseSampleSummary_S02D03_filename = data_dir + "P_timeCourseSampleSummary_S02D03.csv"; timeCourseSampleSummary_S02D04_filename = data_dir + "P_timeCourseSampleSummary_S02D04.csv"; timeCourseSampleSummary_S02D05_filename = data_dir + "P_timeCourseSampleSummary_S02D05.csv"; timeCourseSampleSummary_S01D01vsS01D02_filename = data_dir + "P_timeCourseSampleSummary_S01D01vsS01D02.csv"; timeCourseSampleSummary_S01D01vsS01D03_filename = data_dir + "P_timeCourseSampleSummary_S01D01vsS01D03.csv"; timeCourseSampleSummary_S01D01vsS01D04_filename = data_dir + "P_timeCourseSampleSummary_S01D01vsS01D04.csv"; timeCourseSampleSummary_S01D01vsS01D05_filename = data_dir + "P_timeCourseSampleSummary_S01D01vsS01D05.csv"; timeCourseSampleSummary_S02D01vsS02D02_filename = data_dir + "P_timeCourseSampleSummary_S02D01vsS02D02.csv"; timeCourseSampleSummary_S02D01vsS02D03_filename = data_dir + "P_timeCourseSampleSummary_S02D01vsS02D03.csv"; timeCourseSampleSummary_S02D01vsS02D04_filename = data_dir + "P_timeCourseSampleSummary_S02D01vsS02D04.csv"; timeCourseSampleSummary_S02D01vsS02D05_filename = data_dir + "P_timeCourseSampleSummary_S02D01vsS02D05.csv"; timeCourseFeatureSummary_S01D01_filename = data_dir + "P_timeCourseFeatureSummary_S01D01.csv"; timeCourseFeatureSummary_S01D02_filename = data_dir + "P_timeCourseFeatureSummary_S01D02.csv"; timeCourseFeatureSummary_S01D03_filename = data_dir + "P_timeCourseFeatureSummary_S01D03.csv"; timeCourseFeatureSummary_S01D04_filename = data_dir + "P_timeCourseFeatureSummary_S01D04.csv"; timeCourseFeatureSummary_S01D05_filename = data_dir + "P_timeCourseFeatureSummary_S01D05.csv"; timeCourseFeatureSummary_S02D01_filename = data_dir + "P_timeCourseFeatureSummary_S02D01.csv"; timeCourseFeatureSummary_S02D02_filename = data_dir + "P_timeCourseFeatureSummary_S02D02.csv"; timeCourseFeatureSummary_S02D03_filename = data_dir + "P_timeCourseFeatureSummary_S02D03.csv"; timeCourseFeatureSummary_S02D04_filename = data_dir + "P_timeCourseFeatureSummary_S02D04.csv"; timeCourseFeatureSummary_S02D05_filename = data_dir + "P_timeCourseFeatureSummary_S02D05.csv"; timeCourseFeatureSummary_S01D01vsS01D02_filename = data_dir + "P_timeCourseFeatureSummary_S01D01vsS01D02.csv"; timeCourseFeatureSummary_S01D01vsS01D03_filename = data_dir + "P_timeCourseFeatureSummary_S01D01vsS01D03.csv"; timeCourseFeatureSummary_S01D01vsS01D04_filename = data_dir + "P_timeCourseFeatureSummary_S01D01vsS01D04.csv"; timeCourseFeatureSummary_S01D01vsS01D05_filename = data_dir + "P_timeCourseFeatureSummary_S01D01vsS01D05.csv"; timeCourseFeatureSummary_S02D01vsS02D02_filename = data_dir + "P_timeCourseFeatureSummary_S02D01vsS02D02.csv"; timeCourseFeatureSummary_S02D01vsS02D03_filename = data_dir + "P_timeCourseFeatureSummary_S02D01vsS02D03.csv"; timeCourseFeatureSummary_S02D01vsS02D04_filename = data_dir + "P_timeCourseFeatureSummary_S02D01vsS02D04.csv"; timeCourseFeatureSummary_S02D01vsS02D05_filename = data_dir + "P_timeCourseFeatureSummary_S02D01vsS02D05.csv"; } if (run_timeCourse_S01D01) { // Read in the data PWData timeCourseS01D01; ReadPWData(timeCourse_S01D01_filename, timeCourseS01D01); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D01, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D01_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D01_filename, pw_feature_summaries); } if (run_timeCourse_S01D02) { // Read in the data PWData timeCourseS01D02; ReadPWData(timeCourse_S01D02_filename, timeCourseS01D02); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D02, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D02_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D02_filename, pw_feature_summaries); } if (run_timeCourse_S01D03) { // Read in the data PWData timeCourseS01D03; ReadPWData(timeCourse_S01D03_filename, timeCourseS01D03); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D03, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D03_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D03_filename, pw_feature_summaries); } if (run_timeCourse_S01D04) { // Read in the data PWData timeCourseS01D04; ReadPWData(timeCourse_S01D04_filename, timeCourseS01D04); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D04, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D04_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D04_filename, pw_feature_summaries); } if (run_timeCourse_S01D05) { // Read in the data PWData timeCourseS01D05; ReadPWData(timeCourse_S01D05_filename, timeCourseS01D05); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D05, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D05_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D05_filename, pw_feature_summaries); } if (run_timeCourse_S02D01) { // Read in the data PWData timeCourseS02D01; ReadPWData(timeCourse_S02D01_filename, timeCourseS02D01); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D01, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D01_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D01_filename, pw_feature_summaries); } if (run_timeCourse_S02D02) { // Read in the data PWData timeCourseS02D02; ReadPWData(timeCourse_S02D02_filename, timeCourseS02D02); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D02, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D02_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D02_filename, pw_feature_summaries); } if (run_timeCourse_S02D03) { // Read in the data PWData timeCourseS02D03; ReadPWData(timeCourse_S02D03_filename, timeCourseS02D03); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D03, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D03_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D03_filename, pw_feature_summaries); } if (run_timeCourse_S02D04) { // Read in the data PWData timeCourseS02D04; ReadPWData(timeCourse_S02D04_filename, timeCourseS02D04); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D04, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D04_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D04_filename, pw_feature_summaries); } if (run_timeCourse_S02D05) { // Read in the data PWData timeCourseS02D05; ReadPWData(timeCourse_S02D05_filename, timeCourseS02D05); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D05, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D05_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D05_filename, pw_feature_summaries); } if (run_timeCourse_S01D01vsS01D02) { // Read in the data PWData timeCourseS01D01vsS01D02; ReadPWData(timeCourse_S01D01vsS01D02_filename, timeCourseS01D01vsS01D02); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D01vsS01D02, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D01vsS01D02_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D01vsS01D02_filename, pw_feature_summaries); } if (run_timeCourse_S01D01vsS01D03) { // Read in the data PWData timeCourseS01D01vsS01D03; ReadPWData(timeCourse_S01D01vsS01D03_filename, timeCourseS01D01vsS01D03); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D01vsS01D03, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D01vsS01D03_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D01vsS01D03_filename, pw_feature_summaries); } if (run_timeCourse_S01D01vsS01D04) { // Read in the data PWData timeCourseS01D01vsS01D04; ReadPWData(timeCourse_S01D01vsS01D04_filename, timeCourseS01D01vsS01D04); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D01vsS01D04, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D01vsS01D04_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D01vsS01D04_filename, pw_feature_summaries); } if (run_timeCourse_S01D01vsS01D05) { // Read in the data PWData timeCourseS01D01vsS01D05; ReadPWData(timeCourse_S01D01vsS01D05_filename, timeCourseS01D01vsS01D05); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS01D01vsS01D05, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S01D01vsS01D05_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S01D01vsS01D05_filename, pw_feature_summaries); } if (run_timeCourse_S02D01vsS02D02) { // Read in the data PWData timeCourseS02D01vsS02D02; ReadPWData(timeCourse_S02D01vsS02D02_filename, timeCourseS02D01vsS02D02); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D01vsS02D02, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D01vsS02D02_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D01vsS02D02_filename, pw_feature_summaries); } if (run_timeCourse_S02D01vsS02D03) { // Read in the data PWData timeCourseS02D01vsS02D03; ReadPWData(timeCourse_S02D01vsS02D03_filename, timeCourseS02D01vsS02D03); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D01vsS02D03, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D01vsS02D03_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D01vsS02D03_filename, pw_feature_summaries); } if (run_timeCourse_S02D01vsS02D04) { // Read in the data PWData timeCourseS02D01vsS02D04; ReadPWData(timeCourse_S02D01vsS02D04_filename, timeCourseS02D01vsS02D04); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D01vsS02D04, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D01vsS02D04_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D01vsS02D04_filename, pw_feature_summaries); } if (run_timeCourse_S02D01vsS02D05) { // Read in the data PWData timeCourseS02D01vsS02D05; ReadPWData(timeCourse_S02D01vsS02D05_filename, timeCourseS02D01vsS02D05); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseS02D01vsS02D05, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_S02D01vsS02D05_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_S02D01vsS02D05_filename, pw_feature_summaries); } } void main_statistics_timecourse(std::string blood_fraction = "PLT", bool run_timeCourse_S01D01 = false, bool run_timeCourse_S01D02 = false, bool run_timeCourse_S01D03 = false, bool run_timeCourse_S01D04 = false, bool run_timeCourse_S01D05 = false, bool run_timeCourse_S02D01 = false, bool run_timeCourse_S02D02 = false, bool run_timeCourse_S02D03 = false, bool run_timeCourse_S02D04 = false, bool run_timeCourse_S02D05 = false, bool run_timeCourse_S01D01vsS01D02 = false, bool run_timeCourse_S01D01vsS01D03 = false, bool run_timeCourse_S01D01vsS01D04 = false, bool run_timeCourse_S01D01vsS01D05 = false, bool run_timeCourse_S02D01vsS02D02 = false, bool run_timeCourse_S02D01vsS02D03 = false, bool run_timeCourse_S02D01vsS02D04 = false, bool run_timeCourse_S02D01vsS02D05 = false) { // define the data simulator BiochemicalReactionModel<float> metabolomics_data; // data dirs //std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; //std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; std::string data_dir = "/home/user/Data/"; std::string biochem_rxns_filename, metabo_data_filename, meta_data_filename, timeCourse_S01D01_filename, timeCourse_S01D02_filename, timeCourse_S01D03_filename, timeCourse_S01D04_filename, timeCourse_S01D05_filename, timeCourse_S02D01_filename, timeCourse_S02D02_filename, timeCourse_S02D03_filename, timeCourse_S02D04_filename, timeCourse_S02D05_filename, timeCourse_S01D01vsS01D02_filename, timeCourse_S01D01vsS01D03_filename, timeCourse_S01D01vsS01D04_filename, timeCourse_S01D01vsS01D05_filename, timeCourse_S02D01vsS02D02_filename, timeCourse_S02D01vsS02D03_filename, timeCourse_S02D01vsS02D04_filename, timeCourse_S02D01vsS02D05_filename; std::vector<std::string> pre_samples, timeCourse_S01D01_samples, timeCourse_S01D02_samples, timeCourse_S01D03_samples, timeCourse_S01D04_samples, timeCourse_S01D05_samples, timeCourse_S02D01_samples, timeCourse_S02D02_samples, timeCourse_S02D03_samples, timeCourse_S02D04_samples, timeCourse_S02D05_samples; if (blood_fraction == "RBC") { // RBC filenames biochem_rxns_filename = data_dir + "iAB_RBC_283.csv"; metabo_data_filename = data_dir + "MetabolomicsData_RBC.csv"; meta_data_filename = data_dir + "MetaData_prePost_RBC.csv"; timeCourse_S01D01_filename = data_dir + "RBC_timeCourse_S01D01.csv"; timeCourse_S01D02_filename = data_dir + "RBC_timeCourse_S01D02.csv"; timeCourse_S01D03_filename = data_dir + "RBC_timeCourse_S01D03.csv"; timeCourse_S01D04_filename = data_dir + "RBC_timeCourse_S01D04.csv"; timeCourse_S01D05_filename = data_dir + "RBC_timeCourse_S01D05.csv"; timeCourse_S02D01_filename = data_dir + "RBC_timeCourse_S02D01.csv"; timeCourse_S02D02_filename = data_dir + "RBC_timeCourse_S02D02.csv"; timeCourse_S02D03_filename = data_dir + "RBC_timeCourse_S02D03.csv"; timeCourse_S02D04_filename = data_dir + "RBC_timeCourse_S02D04.csv"; timeCourse_S02D05_filename = data_dir + "RBC_timeCourse_S02D05.csv"; timeCourse_S01D01vsS01D02_filename = data_dir + "RBC_timeCourse_S01D01vsS01D02.csv"; timeCourse_S01D01vsS01D03_filename = data_dir + "RBC_timeCourse_S01D01vsS01D03.csv"; timeCourse_S01D01vsS01D04_filename = data_dir + "RBC_timeCourse_S01D01vsS01D04.csv"; timeCourse_S01D01vsS01D05_filename = data_dir + "RBC_timeCourse_S01D01vsS01D05.csv"; timeCourse_S02D01vsS02D02_filename = data_dir + "RBC_timeCourse_S02D01vsS02D02.csv"; timeCourse_S02D01vsS02D03_filename = data_dir + "RBC_timeCourse_S02D01vsS02D03.csv"; timeCourse_S02D01vsS02D04_filename = data_dir + "RBC_timeCourse_S02D01vsS02D04.csv"; timeCourse_S02D01vsS02D05_filename = data_dir + "RBC_timeCourse_S02D01vsS02D05.csv"; pre_samples = { "RBC_36","RBC_142","RBC_140","RBC_34","RBC_154","RBC_143","RBC_30","RBC_31","RBC_33","RBC_35","RBC_141" }; timeCourse_S01D01_samples = { "S01_D01_RBC_25C_0hr","S01_D01_RBC_25C_2hr","S01_D01_RBC_25C_6.5hr","S01_D01_RBC_25C_22hr","S01_D01_RBC_37C_22hr" }; timeCourse_S01D02_samples = { "S01_D02_RBC_25C_0hr","S01_D02_RBC_25C_2hr","S01_D02_RBC_25C_6.5hr","S01_D02_RBC_25C_22hr","S01_D02_RBC_37C_22hr" }; timeCourse_S01D03_samples = { "S01_D03_RBC_25C_0hr","S01_D03_RBC_25C_2hr","S01_D03_RBC_25C_6.5hr","S01_D03_RBC_25C_22hr","S01_D03_RBC_37C_22hr" }; timeCourse_S01D04_samples = { "S01_D04_RBC_25C_0hr","S01_D04_RBC_25C_2hr","S01_D04_RBC_25C_6.5hr","S01_D04_RBC_25C_22hr","S01_D04_RBC_37C_22hr" }; timeCourse_S01D05_samples = { "S01_D05_RBC_25C_0hr","S01_D05_RBC_25C_2hr","S01_D05_RBC_25C_6.5hr","S01_D05_RBC_25C_22hr","S01_D05_RBC_37C_22hr" }; timeCourse_S02D01_samples = { "S02_D01_RBC_25C_0hr","S02_D01_RBC_25C_2hr","S02_D01_RBC_25C_6.5hr","S02_D01_RBC_25C_22hr","S02_D01_RBC_37C_22hr" }; timeCourse_S02D02_samples = { "S02_D02_RBC_25C_0hr","S02_D02_RBC_25C_2hr","S02_D02_RBC_25C_6.5hr","S02_D02_RBC_25C_22hr","S02_D02_RBC_37C_22hr" }; timeCourse_S02D03_samples = { "S02_D03_RBC_25C_0hr","S02_D03_RBC_25C_2hr","S02_D03_RBC_25C_6.5hr","S02_D03_RBC_25C_22hr","S02_D03_RBC_37C_22hr" }; timeCourse_S02D04_samples = { "S02_D04_RBC_25C_0hr","S02_D04_RBC_25C_2hr","S02_D04_RBC_25C_6.5hr","S02_D04_RBC_25C_22hr","S02_D04_RBC_37C_22hr" }; timeCourse_S02D05_samples = { "S02_D05_RBC_25C_0hr","S02_D05_RBC_25C_2hr","S02_D05_RBC_25C_6.5hr","S02_D05_RBC_25C_22hr","S02_D05_RBC_37C_22hr" }; } else if (blood_fraction == "PLT") { // PLT filenames biochem_rxns_filename = data_dir + "iAT_PLT_636.csv"; metabo_data_filename = data_dir + "MetabolomicsData_PLT.csv"; meta_data_filename = data_dir + "MetaData_prePost_PLT.csv"; timeCourse_S01D01_filename = data_dir + "PLT_timeCourse_S01D01.csv"; timeCourse_S01D02_filename = data_dir + "PLT_timeCourse_S01D02.csv"; timeCourse_S01D03_filename = data_dir + "PLT_timeCourse_S01D03.csv"; timeCourse_S01D04_filename = data_dir + "PLT_timeCourse_S01D04.csv"; timeCourse_S01D05_filename = data_dir + "PLT_timeCourse_S01D05.csv"; timeCourse_S02D01_filename = data_dir + "PLT_timeCourse_S02D01.csv"; timeCourse_S02D02_filename = data_dir + "PLT_timeCourse_S02D02.csv"; timeCourse_S02D03_filename = data_dir + "PLT_timeCourse_S02D03.csv"; timeCourse_S02D04_filename = data_dir + "PLT_timeCourse_S02D04.csv"; timeCourse_S02D05_filename = data_dir + "PLT_timeCourse_S02D05.csv"; timeCourse_S01D01vsS01D02_filename = data_dir + "PLT_timeCourse_S01D01vsS01D02.csv"; timeCourse_S01D01vsS01D03_filename = data_dir + "PLT_timeCourse_S01D01vsS01D03.csv"; timeCourse_S01D01vsS01D04_filename = data_dir + "PLT_timeCourse_S01D01vsS01D04.csv"; timeCourse_S01D01vsS01D05_filename = data_dir + "PLT_timeCourse_S01D01vsS01D05.csv"; timeCourse_S02D01vsS02D02_filename = data_dir + "PLT_timeCourse_S02D01vsS02D02.csv"; timeCourse_S02D01vsS02D03_filename = data_dir + "PLT_timeCourse_S02D01vsS02D03.csv"; timeCourse_S02D01vsS02D04_filename = data_dir + "PLT_timeCourse_S02D01vsS02D04.csv"; timeCourse_S02D01vsS02D05_filename = data_dir + "PLT_timeCourse_S02D01vsS02D05.csv"; pre_samples = { "PLT_36","PLT_142","PLT_140","PLT_34","PLT_154","PLT_143","PLT_30","PLT_31","PLT_33","PLT_35","PLT_141" }; timeCourse_S01D01_samples = { "S01_D01_PLT_25C_0hr","S01_D01_PLT_25C_2hr","S01_D01_PLT_25C_6.5hr","S01_D01_PLT_25C_22hr","S01_D01_PLT_37C_22hr" }; timeCourse_S01D02_samples = { "S01_D02_PLT_25C_0hr","S01_D02_PLT_25C_2hr","S01_D02_PLT_25C_6.5hr","S01_D02_PLT_25C_22hr","S01_D02_PLT_37C_22hr" }; timeCourse_S01D03_samples = { "S01_D03_PLT_25C_0hr","S01_D03_PLT_25C_2hr","S01_D03_PLT_25C_6.5hr","S01_D03_PLT_25C_22hr","S01_D03_PLT_37C_22hr" }; timeCourse_S01D04_samples = { "S01_D04_PLT_25C_0hr","S01_D04_PLT_25C_2hr","S01_D04_PLT_25C_6.5hr","S01_D04_PLT_25C_22hr","S01_D04_PLT_37C_22hr" }; timeCourse_S01D05_samples = { "S01_D05_PLT_25C_0hr","S01_D05_PLT_25C_2hr","S01_D05_PLT_25C_6.5hr","S01_D05_PLT_25C_22hr","S01_D05_PLT_37C_22hr" }; timeCourse_S02D01_samples = { "S02_D01_PLT_25C_0hr","S02_D01_PLT_25C_2hr","S02_D01_PLT_25C_6.5hr","S02_D01_PLT_25C_22hr","S02_D01_PLT_37C_22hr" }; timeCourse_S02D02_samples = { "S02_D02_PLT_25C_0hr","S02_D02_PLT_25C_2hr","S02_D02_PLT_25C_6.5hr","S02_D02_PLT_25C_22hr","S02_D02_PLT_37C_22hr" }; timeCourse_S02D03_samples = { "S02_D03_PLT_25C_0hr","S02_D03_PLT_25C_2hr","S02_D03_PLT_25C_6.5hr","S02_D03_PLT_25C_22hr","S02_D03_PLT_37C_22hr" }; timeCourse_S02D04_samples = { "S02_D04_PLT_25C_0hr","S02_D04_PLT_25C_2hr","S02_D04_PLT_25C_6.5hr","S02_D04_PLT_25C_22hr","S02_D04_PLT_37C_22hr" }; timeCourse_S02D05_samples = { "S02_D05_PLT_25C_0hr","S02_D05_PLT_25C_2hr","S02_D05_PLT_25C_6.5hr","S02_D05_PLT_25C_22hr","S02_D05_PLT_37C_22hr" }; } else if (blood_fraction == "P") { // P filenames biochem_rxns_filename = data_dir + "iAT_PLT_636.csv"; metabo_data_filename = data_dir + "MetabolomicsData_P.csv"; meta_data_filename = data_dir + "MetaData_prePost_P.csv"; timeCourse_S01D01_filename = data_dir + "P_timeCourse_S01D01.csv"; timeCourse_S01D02_filename = data_dir + "P_timeCourse_S01D02.csv"; timeCourse_S01D03_filename = data_dir + "P_timeCourse_S01D03.csv"; timeCourse_S01D04_filename = data_dir + "P_timeCourse_S01D04.csv"; timeCourse_S01D05_filename = data_dir + "P_timeCourse_S01D05.csv"; timeCourse_S02D01_filename = data_dir + "P_timeCourse_S02D01.csv"; timeCourse_S02D02_filename = data_dir + "P_timeCourse_S02D02.csv"; timeCourse_S02D03_filename = data_dir + "P_timeCourse_S02D03.csv"; timeCourse_S02D04_filename = data_dir + "P_timeCourse_S02D04.csv"; timeCourse_S02D05_filename = data_dir + "P_timeCourse_S02D05.csv"; timeCourse_S01D01vsS01D02_filename = data_dir + "P_timeCourse_S01D01vsS01D02.csv"; timeCourse_S01D01vsS01D03_filename = data_dir + "P_timeCourse_S01D01vsS01D03.csv"; timeCourse_S01D01vsS01D04_filename = data_dir + "P_timeCourse_S01D01vsS01D04.csv"; timeCourse_S01D01vsS01D05_filename = data_dir + "P_timeCourse_S01D01vsS01D05.csv"; timeCourse_S02D01vsS02D02_filename = data_dir + "P_timeCourse_S02D01vsS02D02.csv"; timeCourse_S02D01vsS02D03_filename = data_dir + "P_timeCourse_S02D01vsS02D03.csv"; timeCourse_S02D01vsS02D04_filename = data_dir + "P_timeCourse_S02D01vsS02D04.csv"; timeCourse_S02D01vsS02D05_filename = data_dir + "P_timeCourse_S02D01vsS02D05.csv"; pre_samples = { "P_36","P_142","P_140","P_34","P_154","P_143","P_30","P_31","P_33","P_35","P_141" }; timeCourse_S01D01_samples = { "S01_D01_P_25C_0hr","S01_D01_P_25C_2hr","S01_D01_P_25C_6.5hr","S01_D01_P_25C_22hr","S01_D01_P_37C_22hr" }; timeCourse_S01D02_samples = { "S01_D02_P_25C_0hr","S01_D02_P_25C_2hr","S01_D02_P_25C_6.5hr","S01_D02_P_25C_22hr","S01_D02_P_37C_22hr" }; timeCourse_S01D03_samples = { "S01_D03_P_25C_0hr","S01_D03_P_25C_2hr","S01_D03_P_25C_6.5hr","S01_D03_P_25C_22hr","S01_D03_P_37C_22hr" }; timeCourse_S01D04_samples = { "S01_D04_P_25C_0hr","S01_D04_P_25C_2hr","S01_D04_P_25C_6.5hr","S01_D04_P_25C_22hr","S01_D04_P_37C_22hr" }; timeCourse_S01D05_samples = { "S01_D05_P_25C_0hr","S01_D05_P_25C_2hr","S01_D05_P_25C_6.5hr","S01_D05_P_25C_22hr","S01_D05_P_37C_22hr" }; timeCourse_S02D01_samples = { "S02_D01_P_25C_0hr","S02_D01_P_25C_2hr","S02_D01_P_25C_6.5hr","S02_D01_P_25C_22hr","S02_D01_P_37C_22hr" }; timeCourse_S02D02_samples = { "S02_D02_P_25C_0hr","S02_D02_P_25C_2hr","S02_D02_P_25C_6.5hr","S02_D02_P_25C_22hr","S02_D02_P_37C_22hr" }; timeCourse_S02D03_samples = { "S02_D03_P_25C_0hr","S02_D03_P_25C_2hr","S02_D03_P_25C_6.5hr","S02_D03_P_25C_22hr","S02_D03_P_37C_22hr" }; timeCourse_S02D04_samples = { "S02_D04_P_25C_0hr","S02_D04_P_25C_2hr","S02_D04_P_25C_6.5hr","S02_D04_P_25C_22hr","S02_D04_P_37C_22hr" }; timeCourse_S02D05_samples = { "S02_D05_P_25C_0hr","S02_D05_P_25C_2hr","S02_D05_P_25C_6.5hr","S02_D05_P_25C_22hr","S02_D05_P_37C_22hr" }; } // read in the data metabolomics_data.readBiochemicalReactions(biochem_rxns_filename); metabolomics_data.readMetabolomicsData(metabo_data_filename); metabolomics_data.readMetaData(meta_data_filename); metabolomics_data.findComponentGroupNames(); metabolomics_data.findMARs(); metabolomics_data.findLabels(); if (run_timeCourse_S01D01) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D01 = PWComparison(metabolomics_data, timeCourse_S01D01_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D01_filename, timeCourseS01D01); } if (run_timeCourse_S01D02) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D02 = PWComparison(metabolomics_data, timeCourse_S01D02_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D02_filename, timeCourseS01D02); } if (run_timeCourse_S01D03) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D03 = PWComparison(metabolomics_data, timeCourse_S01D03_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D03_filename, timeCourseS01D03); } if (run_timeCourse_S01D04) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D04 = PWComparison(metabolomics_data, timeCourse_S01D04_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D04_filename, timeCourseS01D04); } if (run_timeCourse_S01D05) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D05 = PWComparison(metabolomics_data, timeCourse_S01D05_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D05_filename, timeCourseS01D05); } if (run_timeCourse_S02D01) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D01 = PWComparison(metabolomics_data, timeCourse_S02D01_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D01_filename, timeCourseS02D01); } if (run_timeCourse_S02D02) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D02 = PWComparison(metabolomics_data, timeCourse_S02D02_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D02_filename, timeCourseS02D02); } if (run_timeCourse_S02D03) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D03 = PWComparison(metabolomics_data, timeCourse_S02D03_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D03_filename, timeCourseS02D03); } if (run_timeCourse_S02D04) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D04 = PWComparison(metabolomics_data, timeCourse_S02D04_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D04_filename, timeCourseS02D04); } if (run_timeCourse_S02D05) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D05 = PWComparison(metabolomics_data, timeCourse_S02D05_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D05_filename, timeCourseS02D05); } if (run_timeCourse_S01D01vsS01D02) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D01vsS01D02 = PWPrePostComparison(metabolomics_data, timeCourse_S01D01_samples, timeCourse_S01D02_samples, 4, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D01vsS01D02_filename, timeCourseS01D01vsS01D02); } if (run_timeCourse_S01D01vsS01D03) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D01vsS01D03 = PWPrePostComparison(metabolomics_data, timeCourse_S01D01_samples, timeCourse_S01D03_samples, 4, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D01vsS01D03_filename, timeCourseS01D01vsS01D03); } if (run_timeCourse_S01D01vsS01D04) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D01vsS01D04 = PWPrePostComparison(metabolomics_data, timeCourse_S01D01_samples, timeCourse_S01D04_samples, 4, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D01vsS01D04_filename, timeCourseS01D01vsS01D04); } if (run_timeCourse_S01D01vsS01D05) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS01D01vsS01D05 = PWPrePostComparison(metabolomics_data, timeCourse_S01D01_samples, timeCourse_S01D05_samples, 4, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S01D01vsS01D05_filename, timeCourseS01D01vsS01D05); } if (run_timeCourse_S02D01vsS02D02) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D01vsS02D02 = PWPrePostComparison(metabolomics_data, timeCourse_S02D01_samples, timeCourse_S02D02_samples, 4, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D01vsS02D02_filename, timeCourseS02D01vsS02D02); } if (run_timeCourse_S02D01vsS02D03) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D01vsS02D03 = PWPrePostComparison(metabolomics_data, timeCourse_S02D01_samples, timeCourse_S02D03_samples, 4, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D01vsS02D03_filename, timeCourseS02D01vsS02D03); } if (run_timeCourse_S02D01vsS02D04) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D01vsS02D04 = PWPrePostComparison(metabolomics_data, timeCourse_S02D01_samples, timeCourse_S02D04_samples, 4, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D01vsS02D04_filename, timeCourseS02D01vsS02D04); } if (run_timeCourse_S02D01vsS02D05) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData timeCourseS02D01vsS02D05 = PWPrePostComparison(metabolomics_data, timeCourse_S02D01_samples, timeCourse_S02D05_samples, 4, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_S02D01vsS02D05_filename, timeCourseS02D01vsS02D05); } } void main_statistics_controlsSummary(std::string blood_fraction = "PLT", bool run_controls = false) { // define the data simulator BiochemicalReactionModel<float> metabolomics_data; // data dirs //std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; //std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; std::string data_dir = "/home/user/Data/"; std::string controls_filename, controlsSampleSummary_filename, controlsFeatureSummary_filename; if (blood_fraction == "RBC") { // RBC filenames controls_filename = data_dir + "RBC_controls.csv"; controlsSampleSummary_filename = data_dir + "RBC_controlsSampleSummary.csv"; controlsFeatureSummary_filename = data_dir + "RBC_controlsFeatureSummary.csv"; } else if (blood_fraction == "PLT") { // PLT filenames controls_filename = data_dir + "PLT_controls.csv"; controlsSampleSummary_filename = data_dir + "PLT_controlsSampleSummary.csv"; controlsFeatureSummary_filename = data_dir + "PLT_controlsFeatureSummary.csv"; } else if (blood_fraction == "P") { // P filenames controls_filename = data_dir + "P_controls.csv"; controlsSampleSummary_filename = data_dir + "P_controlsSampleSummary.csv"; controlsFeatureSummary_filename = data_dir + "P_controlsFeatureSummary.csv"; } if (run_controls) { // Read in the data PWData controls; ReadPWData(controls_filename, controls); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(controls, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(controlsSampleSummary_filename, pw_sample_summaries); WritePWFeatureSummaries(controlsFeatureSummary_filename, pw_feature_summaries); } } void main_statistics_controls(std::string blood_fraction = "PLT", bool run_controls = false) { // define the data simulator BiochemicalReactionModel<float> metabolomics_data; // data dirs //std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; //std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; std::string data_dir = "/home/user/Data/"; std::string biochem_rxns_filename, metabo_data_filename, meta_data_filename, controls_filename; std::vector<std::string> invivo_samples, invitro_samples; if (blood_fraction == "RBC") { // RBC filenames biochem_rxns_filename = data_dir + "iAB_RBC_283.csv"; metabo_data_filename = data_dir + "MetabolomicsData_RBC.csv"; meta_data_filename = data_dir + "MetaData_prePost_RBC.csv"; controls_filename = data_dir + "RBC_controls.csv"; invivo_samples = { "RBC_36","RBC_140" }; invitro_samples = { "S02_D01_RBC_25C_0hr","S01_D01_RBC_25C_0hr" }; } else if (blood_fraction == "PLT") { // PLT filenames biochem_rxns_filename = data_dir + "iAT_PLT_636.csv"; metabo_data_filename = data_dir + "MetabolomicsData_PLT.csv"; meta_data_filename = data_dir + "MetaData_prePost_PLT.csv"; controls_filename = data_dir + "PLT_controls.csv"; invivo_samples = { "PLT_36","PLT_140" }; invitro_samples = { "S02_D01_PLT_25C_0hr","S01_D01_PLT_25C_0hr" }; } else if (blood_fraction == "P") { // P filenames biochem_rxns_filename = data_dir + "iAT_PLT_636.csv"; metabo_data_filename = data_dir + "MetabolomicsData_P.csv"; meta_data_filename = data_dir + "MetaData_prePost_P.csv"; controls_filename = data_dir + "P_controls.csv"; invivo_samples = { "P_36","P_140" }; invitro_samples = { "S02_D01_P_25C_0hr","S01_D01_P_25C_0hr" }; } // read in the data metabolomics_data.readBiochemicalReactions(biochem_rxns_filename); metabolomics_data.readMetabolomicsData(metabo_data_filename); metabolomics_data.readMetaData(meta_data_filename); metabolomics_data.findComponentGroupNames(); metabolomics_data.findMARs(); metabolomics_data.findLabels(); if (run_controls) { // Find significant pair-wise MARS between pre/post samples (one pre vs one post) PWData controls = PWPrePostComparison(metabolomics_data, invivo_samples, invitro_samples, 2, 10000, 0.05, 1.0); // Export to file WritePWData(controls_filename, controls); } } void main_statistics_preVsPost(std::string blood_fraction = "PLT", bool run_oneVSone = true, bool run_preVSpost = true, bool run_postMinPre = false) { // define the data simulator BiochemicalReactionModel<float> metabolomics_data; // data dirs //std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; //std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; std::string data_dir = "/home/user/Data/"; std::string biochem_rxns_filename, metabo_data_filename, meta_data_filename, oneVSonePre_filename, oneVSonePost_filename, preVSpost_filename, postMinPre_filename; std::vector<std::string> pre_samples, post_samples; if (blood_fraction == "RBC") { // RBC filenames biochem_rxns_filename = data_dir + "iAB_RBC_283.csv"; metabo_data_filename = data_dir + "MetabolomicsData_RBC.csv"; meta_data_filename = data_dir + "MetaData_prePost_RBC.csv"; oneVSonePre_filename = data_dir + "RBC_oneVSonePre.csv"; oneVSonePost_filename = data_dir + "RBC_oneVSonePost.csv"; preVSpost_filename = data_dir + "RBC_preVSpost.csv"; postMinPre_filename = data_dir + "RBC_postMinPre.csv"; pre_samples = { "RBC_36","RBC_142","RBC_140","RBC_34","RBC_154","RBC_143","RBC_30","RBC_31","RBC_33","RBC_35","RBC_141" }; post_samples = { "RBC_43","RBC_152","RBC_150","RBC_38","RBC_155","RBC_153","RBC_37","RBC_39","RBC_42","RBC_40","RBC_151" }; } else if (blood_fraction == "PLT") { // PLT filenames biochem_rxns_filename = data_dir + "iAT_PLT_636.csv"; metabo_data_filename = data_dir + "MetabolomicsData_PLT.csv"; meta_data_filename = data_dir + "MetaData_prePost_PLT.csv"; oneVSonePre_filename = data_dir + "PLT_oneVSonePre.csv"; oneVSonePost_filename = data_dir + "PLT_oneVSonePost.csv"; preVSpost_filename = data_dir + "PLT_preVSpost.csv"; postMinPre_filename = data_dir + "PLT_postMinPre.csv"; pre_samples = { "PLT_36","PLT_142","PLT_140","PLT_34","PLT_154","PLT_143","PLT_30","PLT_31","PLT_33","PLT_35","PLT_141" }; post_samples = { "PLT_43","PLT_152","PLT_150","PLT_38","PLT_155","PLT_153","PLT_37","PLT_39","PLT_42","PLT_40","PLT_151" }; } else if (blood_fraction == "P") { // P filenames biochem_rxns_filename = data_dir + "iAT_PLT_636.csv"; metabo_data_filename = data_dir + "MetabolomicsData_P.csv"; meta_data_filename = data_dir + "MetaData_prePost_P.csv"; oneVSonePre_filename = data_dir + "P_oneVSonePre.csv"; oneVSonePost_filename = data_dir + "P_oneVSonePost.csv"; preVSpost_filename = data_dir + "P_preVSpost.csv"; postMinPre_filename = data_dir + "P_postMinPre.csv"; pre_samples = { "P_36","P_142","P_140","P_34","P_154","P_143","P_30","P_31","P_33","P_35","P_141" }; post_samples = { "P_43","P_152","P_150","P_38","P_155","P_153","P_37","P_39","P_42","P_40","P_151" }; } // read in the data metabolomics_data.readBiochemicalReactions(biochem_rxns_filename); metabolomics_data.readMetabolomicsData(metabo_data_filename); metabolomics_data.readMetaData(meta_data_filename); metabolomics_data.findComponentGroupNames(); metabolomics_data.findMARs(); metabolomics_data.findMARs(true, false); metabolomics_data.findMARs(false, true); metabolomics_data.removeRedundantMARs(); metabolomics_data.findLabels(); if (run_oneVSone) { // Find significant pair-wise MARS between each sample (one vs one Pre-ASA) PWData oneVSonePre = PWComparison(metabolomics_data, pre_samples, 10000, 0.05, 1.0); // Export to file WritePWData(oneVSonePre_filename, oneVSonePre); // Find significant pair-wise MARS between each sample (one vs one Post-ASA) PWData oneVSonePost = PWComparison(metabolomics_data, post_samples, 10000, 0.05, 1.0); // Export to file WritePWData(oneVSonePost_filename, oneVSonePost); } if (run_preVSpost) { // Find significant pair-wise MARS between pre/post samples (one pre vs one post) PWData preVSpost = PWPrePostComparison(metabolomics_data, pre_samples, post_samples, 11, 10000, 0.05, 1.0); // Export to file WritePWData(preVSpost_filename, preVSpost); } if (run_postMinPre) { // Find significant pair-wise MARS between post-pre samples (post-pre vs post-pre) for each individual PWData postMinPre = PWPrePostDifference(metabolomics_data, pre_samples, post_samples, 11, 10000, 0.05, 1.0); // Export to file WritePWData(postMinPre_filename, postMinPre); } } // Main int main(int argc, char** argv) { main_statistics_controls("PLT", true); main_statistics_controls("RBC", true); main_statistics_controls("P", true); main_statistics_controlsSummary("PLT", true); main_statistics_controlsSummary("RBC", true); main_statistics_controlsSummary("P", true); main_statistics_timecourse("PLT", true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true); main_statistics_timecourse("P", true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true); main_statistics_timecourse("RBC", true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true); main_statistics_timecourseSummary("PLT", true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true); main_statistics_timecourseSummary("P", true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true); main_statistics_timecourseSummary("RBC", true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true); main_statistics_preVsPost("PLT", true, true, false); main_statistics_preVsPost("RBC", true, true, false); main_statistics_preVsPost("P", true, true, false); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELTRAINEREXPERIMENTALDEFAULTDEVICE_H #define EVONET_MODELTRAINEREXPERIMENTALDEFAULTDEVICE_H // .h #include <EvoNet/ml/ModelTrainerExperimental.h> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> // .cpp #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/io/ModelFile.h> namespace EvoNet { /** @brief Class to train a network model */ template<typename TensorT> class ModelTrainerExperimentalDefaultDevice : public ModelTrainerExperimental<TensorT, ModelInterpreterDefaultDevice<TensorT>> { public: ModelTrainerExperimentalDefaultDevice() = default; ///< Default constructor ~ModelTrainerExperimentalDefaultDevice() = default; ///< Default destructor /// Overrides used in all examples void adaptiveTrainerScheduler(const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<TensorT>& model_errors) override; }; template<typename TensorT> inline void ModelTrainerExperimentalDefaultDevice<TensorT>::adaptiveTrainerScheduler(const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<TensorT>& model_errors) { //if (n_epochs == 0) { // ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; // interpreter_data.storeModelInterpreterCsv(model.getName() + "_interpreter.csv", model_interpreter); //} if (n_epochs % 1000 == 0 && n_epochs != 0) { // save the model every 1000 epochs model_interpreter.getModelResults(model, false, true, false, false); ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } } } #endif //EVONET_MODELTRAINEREXPERIMENTALDEFAULTDEVICE_H<file_sep>### the directory name set(directory include/EvoNet/graph) ### list all header files of the directory here set(sources_list_h CircuitFinder.h ) ### add path to the filenames set(sources_h) foreach(i ${sources_list_h}) list(APPEND sources_h ${directory}/${i}) endforeach(i) ### source group definition source_group("Header Files\\EvoNet\\graph" FILES ${sources_h}) set(EvoNet_sources_h ${EvoNet_sources_h} ${sources_h}) <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE IntegrationFunction test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/IntegrationFunction.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(integrationFunction) /** SumOp Tests */ BOOST_AUTO_TEST_CASE(constructorSumOp) { SumOp<float>* ptrReLU = nullptr; SumOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorSumOp) { SumOp<float>* ptrReLU = nullptr; ptrReLU = new SumOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameSumOp) { SumOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "SumOp"); } /** ProdOp Tests */ BOOST_AUTO_TEST_CASE(constructorProdOp) { ProdOp<float>* ptrReLU = nullptr; ProdOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorProdOp) { ProdOp<float>* ptrReLU = nullptr; ptrReLU = new ProdOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameProdOp) { ProdOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "ProdOp"); } /** ProdSCOp Tests */ BOOST_AUTO_TEST_CASE(constructorProdSCOp) { ProdSCOp<float>* ptrReLU = nullptr; ProdSCOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorProdSCOp) { ProdSCOp<float>* ptrReLU = nullptr; ptrReLU = new ProdSCOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameProdSCOp) { ProdSCOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "ProdSCOp"); } /** MaxOp Tests */ BOOST_AUTO_TEST_CASE(constructorMaxOp) { MaxOp<float>* ptrReLU = nullptr; MaxOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMaxOp) { MaxOp<float>* ptrReLU = nullptr; ptrReLU = new MaxOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMaxOp) { MaxOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MaxOp"); } /** MinOp Tests */ BOOST_AUTO_TEST_CASE(constructorMinOp) { MinOp<float>* ptrReLU = nullptr; MinOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMinOp) { MinOp<float>* ptrReLU = nullptr; ptrReLU = new MinOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMinOp) { MinOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MinOp"); } /** MeanOp Tests */ BOOST_AUTO_TEST_CASE(constructorMeanOp) { MeanOp<float>* ptrReLU = nullptr; MeanOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMeanOp) { MeanOp<float>* ptrReLU = nullptr; ptrReLU = new MeanOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMeanOp) { MeanOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MeanOp"); } /** VarModOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarModOp) { VarModOp<float>* ptrReLU = nullptr; VarModOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarModOp) { VarModOp<float>* ptrReLU = nullptr; ptrReLU = new VarModOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameVarModOp) { VarModOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "VarModOp"); } /** CountOp Tests */ BOOST_AUTO_TEST_CASE(constructorCountOp) { CountOp<float>* ptrReLU = nullptr; CountOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorCountOp) { CountOp<float>* ptrReLU = nullptr; ptrReLU = new CountOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameCountOp) { CountOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "CountOp"); } /** SumErrorOp Tests */ BOOST_AUTO_TEST_CASE(constructorSumErrorOp) { SumErrorOp<float>* ptrReLU = nullptr; SumErrorOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorSumErrorOp) { SumErrorOp<float>* ptrReLU = nullptr; ptrReLU = new SumErrorOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameSumErrorOp) { SumErrorOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "SumErrorOp"); } /** ProdErrorOp Tests */ BOOST_AUTO_TEST_CASE(constructorProdErrorOp) { ProdErrorOp<float>* ptrReLU = nullptr; ProdErrorOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorProdErrorOp) { ProdErrorOp<float>* ptrReLU = nullptr; ptrReLU = new ProdErrorOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameProdErrorOp) { ProdErrorOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "ProdErrorOp"); } /** MaxErrorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMaxErrorOp) { MaxErrorOp<float>* ptrReLU = nullptr; MaxErrorOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMaxErrorOp) { MaxErrorOp<float>* ptrReLU = nullptr; ptrReLU = new MaxErrorOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMaxErrorOp) { MaxErrorOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MaxErrorOp"); } /** MinErrorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMinErrorOp) { MinErrorOp<float>* ptrReLU = nullptr; MinErrorOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMinErrorOp) { MinErrorOp<float>* ptrReLU = nullptr; ptrReLU = new MinErrorOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMinErrorOp) { MinErrorOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MinErrorOp"); } /** MeanErrorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMeanErrorOp) { MeanErrorOp<float>* ptrReLU = nullptr; MeanErrorOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMeanErrorOp) { MeanErrorOp<float>* ptrReLU = nullptr; ptrReLU = new MeanErrorOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMeanErrorOp) { MeanErrorOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MeanErrorOp"); } /** VarModErrorOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarModErrorOp) { VarModErrorOp<float>* ptrReLU = nullptr; VarModErrorOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarModErrorOp) { VarModErrorOp<float>* ptrReLU = nullptr; ptrReLU = new VarModErrorOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameVarModErrorOp) { VarModErrorOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "VarModErrorOp"); } /** CountErrorOp Tests */ BOOST_AUTO_TEST_CASE(constructorCountErrorOp) { CountErrorOp<float>* ptrReLU = nullptr; CountErrorOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorCountErrorOp) { CountErrorOp<float>* ptrReLU = nullptr; ptrReLU = new CountErrorOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameCountErrorOp) { CountErrorOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "CountErrorOp"); } /** SumWeightGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorSumWeightGradOp) { SumWeightGradOp<float>* ptrReLU = nullptr; SumWeightGradOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorSumWeightGradOp) { SumWeightGradOp<float>* ptrReLU = nullptr; ptrReLU = new SumWeightGradOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameSumWeightGradOp) { SumWeightGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "SumWeightGradOp"); } /** ProdWeightGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorProdWeightGradOp) { ProdWeightGradOp<float>* ptrReLU = nullptr; ProdWeightGradOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorProdWeightGradOp) { ProdWeightGradOp<float>* ptrReLU = nullptr; ptrReLU = new ProdWeightGradOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameProdWeightGradOp) { ProdWeightGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "ProdWeightGradOp"); } /** MaxWeightGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMaxWeightGradOp) { MaxWeightGradOp<float>* ptrReLU = nullptr; MaxWeightGradOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMaxWeightGradOp) { MaxWeightGradOp<float>* ptrReLU = nullptr; ptrReLU = new MaxWeightGradOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMaxWeightGradOp) { MaxWeightGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MaxWeightGradOp"); } /** MinWeightGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMinWeightGradOp) { MinWeightGradOp<float>* ptrReLU = nullptr; MinWeightGradOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMinWeightGradOp) { MinWeightGradOp<float>* ptrReLU = nullptr; ptrReLU = new MinWeightGradOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMinWeightGradOp) { MinWeightGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MinWeightGradOp"); } /** MeanWeightGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMeanWeightGradOp) { MeanWeightGradOp<float>* ptrReLU = nullptr; MeanWeightGradOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMeanWeightGradOp) { MeanWeightGradOp<float>* ptrReLU = nullptr; ptrReLU = new MeanWeightGradOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameMeanWeightGradOp) { MeanWeightGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MeanWeightGradOp"); } /** VarModWeightGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarModWeightGradOp) { VarModWeightGradOp<float>* ptrReLU = nullptr; VarModWeightGradOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarModWeightGradOp) { VarModWeightGradOp<float>* ptrReLU = nullptr; ptrReLU = new VarModWeightGradOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameVarModWeightGradOp) { VarModWeightGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "VarModWeightGradOp"); } /** CountWeightGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorCountWeightGradOp) { CountWeightGradOp<float>* ptrReLU = nullptr; CountWeightGradOp<float>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorCountWeightGradOp) { CountWeightGradOp<float>* ptrReLU = nullptr; ptrReLU = new CountWeightGradOp<float>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(getNameCountWeightGradOp) { CountWeightGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "CountWeightGradOp"); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_METABOLOMICSCVAE_H #define EVONET_METABOLOMICSCVAE_H // .h #include <EvoNet/simulator/BiochemicalDataSimulator.h> #include <EvoNet/models/CVAEFullyConn.h> using namespace EvoNet; namespace EvoNetMetabolomics { template<typename TensorT, typename InterpreterT, class ...ParameterTypes> static void makeModelTrainer(CVAEFullyConn<TensorT, InterpreterT>& model_trainer, std::vector<std::string>& output_nodes, std::vector<std::string>& encoding_nodes_mu, std::vector<std::string>& encoding_nodes_logvar, std::vector<std::string>& encoding_nodes_logalpha, std::vector<std::string>& categorical_softmax_nodes, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); setModelTrainerParameters(model_trainer, args...); // CVAE specific parameters and adjustments if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get() == "Train10x") { model_trainer.setNEpochsTraining(std::get<EvoNetParameters::ModelTrainer::NEpochsTraining>(parameters).get() * 10 + 1); // iterate through the cache 10x model_trainer.setLogging(true, false, false); } else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get() == "LatentTraversal" || std::get<EvoNetParameters::Examples::SimulationType>(parameters).get() == "LatentUnsClass") { model_trainer.setLogging(false, true, false); } model_trainer.KL_divergence_warmup_ = std::get<EvoNetParameters::ModelTrainer::KLDivergenceWarmup>(parameters).get(); model_trainer.beta_c_ = std::get<EvoNetParameters::ModelTrainer::BetaC>(parameters).get(); model_trainer.beta_d_ = std::get<EvoNetParameters::ModelTrainer::BetaD>(parameters).get(); model_trainer.capacity_c_ = std::get<EvoNetParameters::ModelTrainer::CapacityC>(parameters).get(); model_trainer.capacity_d_ = std::get<EvoNetParameters::ModelTrainer::CapacityD>(parameters).get(); model_trainer.learning_rate_ = std::get<EvoNetParameters::ModelTrainer::LearningRate>(parameters).get(); model_trainer.gradient_clipping_ = std::get<EvoNetParameters::ModelTrainer::GradientClipping>(parameters).get(); model_trainer.classification_loss_weight_ = std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get(); model_trainer.supervision_warmup_ = std::get<EvoNetParameters::Examples::SupervisionWarmup>(parameters).get(); model_trainer.supervision_percent_ = std::get<EvoNetParameters::Examples::SupervisionPercent>(parameters).get(); // Decide on the reconstruction loss function to use std::shared_ptr<LossFunctionOp<TensorT>> loss_function_op; std::shared_ptr<LossFunctionGradOp<TensorT>> loss_function_grad_op; if (std::get<EvoNetParameters::ModelTrainer::LossFunction>(parameters).get() == std::string("MSE")) { loss_function_op = std::make_shared<MSELossOp<TensorT>>(MSELossOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); loss_function_grad_op = std::make_shared<MSELossGradOp<TensorT>>(MSELossGradOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); } else if (std::get<EvoNetParameters::ModelTrainer::LossFunction>(parameters).get() == std::string("MAE")) { loss_function_op = std::make_shared<MAELossOp<TensorT>>(MAELossOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); loss_function_grad_op = std::make_shared<MAELossGradOp<TensorT>>(MAELossGradOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); } else if (std::get<EvoNetParameters::ModelTrainer::LossFunction>(parameters).get() == std::string("MLE")) { loss_function_op = std::make_shared<MLELossOp<TensorT>>(MLELossOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); loss_function_grad_op = std::make_shared<MLELossGradOp<TensorT>>(MLELossGradOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); } else if (std::get<EvoNetParameters::ModelTrainer::LossFunction>(parameters).get() == std::string("MAPE")) { loss_function_op = std::make_shared<MAPELossOp<TensorT>>(MAPELossOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); loss_function_grad_op = std::make_shared<MAPELossGradOp<TensorT>>(MAPELossGradOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); } else if (std::get<EvoNetParameters::ModelTrainer::LossFunction>(parameters).get() == std::string("BCEWithLogits")) { loss_function_op = std::make_shared<BCEWithLogitsLossOp<TensorT>>(BCEWithLogitsLossOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); loss_function_grad_op = std::make_shared<BCEWithLogitsLossGradOp<TensorT>>(BCEWithLogitsLossGradOp<TensorT>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight1>(parameters).get())); } // Set the loss functions std::vector<LossFunctionHelper<TensorT>> loss_function_helpers; LossFunctionHelper<TensorT> loss_function_helper1, loss_function_helper2, loss_function_helper3, loss_function_helper4, loss_function_helper5; if (output_nodes.size()) { loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { loss_function_op }; loss_function_helper1.loss_function_grads_ = { loss_function_grad_op }; loss_function_helpers.push_back(loss_function_helper1); } if (encoding_nodes_mu.size()) { loss_function_helper2.output_nodes_ = encoding_nodes_mu; loss_function_helper2.loss_functions_ = { std::make_shared<KLDivergenceMuLossOp<TensorT>>(KLDivergenceMuLossOp<TensorT>(1e-6, 0.0, 0.0)) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<KLDivergenceMuLossGradOp<TensorT>>(KLDivergenceMuLossGradOp<TensorT>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper2); } if (encoding_nodes_logvar.size()) { loss_function_helper3.output_nodes_ = encoding_nodes_logvar; loss_function_helper3.loss_functions_ = { std::make_shared<KLDivergenceLogVarLossOp<TensorT>>(KLDivergenceLogVarLossOp<TensorT>(1e-6, 0.0, 0.0)) }; loss_function_helper3.loss_function_grads_ = { std::make_shared<KLDivergenceLogVarLossGradOp<TensorT>>(KLDivergenceLogVarLossGradOp<TensorT>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper3); } if (encoding_nodes_logalpha.size()) { loss_function_helper4.output_nodes_ = encoding_nodes_logalpha; loss_function_helper4.loss_functions_ = { std::make_shared<KLDivergenceCatLossOp<TensorT>>(KLDivergenceCatLossOp<TensorT>(1e-6, 0.0, 0.0)) }; loss_function_helper4.loss_function_grads_ = { std::make_shared<KLDivergenceCatLossGradOp<TensorT>>(KLDivergenceCatLossGradOp<TensorT>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper4); } if (std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get() > 0 && categorical_softmax_nodes.size()) { loss_function_helper5.output_nodes_ = categorical_softmax_nodes; loss_function_helper5.loss_functions_ = { std::make_shared<CrossEntropyWithLogitsLossOp<TensorT>>(CrossEntropyWithLogitsLossOp<TensorT>(1e-8, std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get())) }; loss_function_helper5.loss_function_grads_ = { std::make_shared<CrossEntropyWithLogitsLossGradOp<TensorT>>(CrossEntropyWithLogitsLossGradOp<TensorT>(1e-8, std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get())) }; loss_function_helpers.push_back(loss_function_helper5); } model_trainer.setLossFunctionHelpers(loss_function_helpers); // Set the metric functions std::vector<MetricFunctionHelper<TensorT>> metric_function_helpers; MetricFunctionHelper<TensorT> metric_function_helper1, metric_function_helper2; if (output_nodes.size()) { metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { //std::make_shared<CosineSimilarityOp<TensorT>>(CosineSimilarityOp<TensorT>("Mean")), std::make_shared<CosineSimilarityOp<TensorT>>(CosineSimilarityOp<TensorT>("Var")), //std::make_shared<PearsonROp<TensorT>>(PearsonROp<TensorT>("Mean")), std::make_shared<PearsonROp<TensorT>>(PearsonROp<TensorT>("Var")), std::make_shared<EuclideanDistOp<TensorT>>(EuclideanDistOp<TensorT>("Mean")), std::make_shared<EuclideanDistOp<TensorT>>(EuclideanDistOp<TensorT>("Var")), //std::make_shared<ManhattanDistOp<TensorT>>(ManhattanDistOp<TensorT>("Mean")), std::make_shared<ManhattanDistOp<TensorT>>(ManhattanDistOp<TensorT>("Var")), //std::make_shared<JeffreysAndMatusitaDistOp<TensorT>>(JeffreysAndMatusitaDistOp<TensorT>("Mean")), std::make_shared<JeffreysAndMatusitaDistOp<TensorT>>(JeffreysAndMatusitaDistOp<TensorT>("Var")), //std::make_shared<LogarithmicDistOp<TensorT>>(LogarithmicDistOp<TensorT>("Mean")), std::make_shared<LogarithmicDistOp<TensorT>>(LogarithmicDistOp<TensorT>("Var")), std::make_shared<PercentDifferenceOp<TensorT>>(PercentDifferenceOp<TensorT>("Mean")), std::make_shared<PercentDifferenceOp<TensorT>>(PercentDifferenceOp<TensorT>("Var")) }; metric_function_helper1.metric_names_ = { //"CosineSimilarity-Mean", "CosineSimilarity-Var", //"PearsonR-Mean", "PearsonR-Var", "EuclideanDist-Mean", "EuclideanDist-Var", //"ManhattanDist-Mean", "ManhattanDist-Var", //"JeffreysAndMatusitaDist-Mean", "JeffreysAndMatusitaDist-Var", //"LogarithmicDist-Mean", "LogarithmicDist-Var", "PercentDifference-Mean", "PercentDifference-Var" }; metric_function_helpers.push_back(metric_function_helper1); } if (std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get() > 0 && categorical_softmax_nodes.size()) { metric_function_helper2.output_nodes_ = categorical_softmax_nodes; metric_function_helper2.metric_functions_ = { std::make_shared<AccuracyMCMicroOp<TensorT>>(AccuracyMCMicroOp<TensorT>()), std::make_shared<PrecisionMCMicroOp<TensorT>>(PrecisionMCMicroOp<TensorT>()) }; metric_function_helper2.metric_names_ = { "AccuracyMCMicro", "PrecisionMCMicro" }; metric_function_helpers.push_back(metric_function_helper2); } model_trainer.setMetricFunctionHelpers(metric_function_helpers); } template<typename TensorT, class ...ParameterTypes> static int makeDataSimulator(BiochemicalDataSimulator<TensorT>& data_simulator, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the data simulator data_simulator.n_encodings_continuous_ = std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(); data_simulator.n_encodings_discrete_ = std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(); int n_reaction_ids_training, n_labels_training, n_component_group_names_training; int n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation; int n_reps_per_sample = -1; data_simulator.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, std::get<EvoNetParameters::Examples::BiochemicalRxnsFilename>(parameters).get(), std::get<EvoNetParameters::Examples::MetaboDataTrainFilename>(parameters).get(), std::get<EvoNetParameters::Examples::MetaDataTrainFilename>(parameters).get(), std::get<EvoNetParameters::Examples::MetaboDataTestFilename>(parameters).get(), std::get<EvoNetParameters::Examples::MetaDataTestFilename>(parameters).get(), std::get<EvoNetParameters::Examples::UseConcentrations>(parameters).get(), std::get<EvoNetParameters::Examples::UseMARs>(parameters).get(), std::get<EvoNetParameters::Examples::SampleValues>(parameters).get(), std::get<EvoNetParameters::Examples::IterValues>(parameters).get(), std::get<EvoNetParameters::Examples::FillSampling>(parameters).get(), std::get<EvoNetParameters::Examples::FillMean>(parameters).get(), std::get<EvoNetParameters::Examples::FillZero>(parameters).get(), std::get<EvoNetParameters::Examples::ApplyFoldChange>(parameters).get(), std::get<EvoNetParameters::Examples::FoldChangeRef>(parameters).get(), std::get<EvoNetParameters::Examples::FoldChangeLogBase>(parameters).get(), std::get<EvoNetParameters::Examples::OfflineLinearScaleInput>(parameters).get(), std::get<EvoNetParameters::Examples::OfflineLogTransformInput>(parameters).get(), std::get<EvoNetParameters::Examples::OfflineStandardizeInput>(parameters).get(), std::get<EvoNetParameters::Examples::OnlineLinearScaleInput>(parameters).get(), std::get<EvoNetParameters::Examples::OnlineLogTransformInput>(parameters).get(), std::get<EvoNetParameters::Examples::OnlineStandardizeInput>(parameters).get(), n_reps_per_sample, true, false, std::get<EvoNetParameters::ModelTrainer::NEpochsTraining>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::BatchSize>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::MemorySize>(parameters).get()); // Warn about a mismatch in the number of labels and categorical encoding nodes if (n_labels_training != std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get()) { std::cout << "The number of labels " << n_labels_training << " does not match the number of discrete encodings " << std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get() << std::endl; std::cout << "Ensure that classification losses and metric weights are set to 0. " << std::endl; } // define the model input/output nodes int n_features; if (std::get<EvoNetParameters::Examples::UseMARs>(parameters).get()) n_features = n_reaction_ids_training; else n_features = n_component_group_names_training; return n_features; } } #endif //EVONET_METABOLOMICSCVAE_H<file_sep>set(EvoNet_sources CACHE INTERNAL "This variable should hold all EvoNet sources at the end of the config step" ) ## ATTENTION: The order of includes should be similar to the inclusion hierarchy include(source/core/sources.cmake) include(source/ml/sources.cmake) include(source/io/sources.cmake) set(EvoNet_sources_h CACHE INTERNAL "This variable should hold all EvoNet sources at the end of the config step" ) ## ATTENTION: The order of includes should be similar to the inclusion hierarchy include(include/EvoNet/core/sources.cmake) include(include/EvoNet/graph/sources.cmake) include(include/EvoNet/ml/sources.cmake) include(include/EvoNet/models/sources.cmake) include(include/EvoNet/simulator/sources.cmake) include(include/EvoNet/io/sources.cmake) ## add configured config.h&Co to source group source_group("Header Files\\EvoNet" FILES ${EvoNet_configured_headers}) ## merge all headers to sources (for source group view in VS) list(APPEND EvoNet_sources ${EvoNet_sources_h} ${EvoNet_configured_headers}) # TODO track why the duplicate warnings are thrown for all (!) MOC sources # Macro problem? list(REMOVE_DUPLICATES EvoNet_sources) <file_sep>FAQ === This is a list of Frequently Asked Questions about EvoNet. Feel free to suggest new entries! **EvoNet is running much slower than expected.** Please ensure that the project was built using the correct CUDA architecture. <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_OPTOTENSOROP_H #define EVONET_OPTOTENSOROP_H #include <EvoNet/ml/ActivationFunction.h> #include <EvoNet/ml/ActivationFunctionTensor.h> #include <EvoNet/ml/IntegrationFunction.h> #include <EvoNet/ml/IntegrationFunctionTensor.h> #include <EvoNet/ml/Solver.h> #include <EvoNet/ml/SolverTensor.h> #include <EvoNet/ml/LossFunction.h> #include <EvoNet/ml/LossFunctionTensor.h> #include <EvoNet/ml/MetricFunction.h> #include <EvoNet/ml/MetricFunctionTensor.h> #include <unsupported/Eigen/CXX11/Tensor> namespace EvoNet { /** @brief Base class for all conversions from ...Op to ...TensorOp. */ template<typename TensorT, typename DeviceT, typename OperatorT, typename OperatorTensorT> class OpToTensorOp { public: OpToTensorOp() = default; virtual ~OpToTensorOp() = default; virtual std::shared_ptr<OperatorTensorT> convertOpToTensorOp(std::shared_ptr<OperatorT>& op_class) const = 0; virtual std::vector<TensorT> getTensorParams(std::shared_ptr<OperatorT>& op_class) const = 0; void operator()(std::shared_ptr<OperatorT>& op_class, std::shared_ptr<OperatorTensorT>& op_tensor_class, std::vector<TensorT>& op_params) const { op_tensor_class = convertOpToTensorOp(op_class); op_params = getTensorParams(op_class); } }; template<typename TensorT, typename DeviceT> class ActivationOpToActivationTensorOp: public OpToTensorOp<TensorT, DeviceT, ActivationOp<TensorT>, ActivationTensorOp<TensorT,DeviceT>> { public: std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> convertOpToTensorOp(std::shared_ptr<ActivationOp<TensorT>>& op_class) const { if (op_class->getName() == "ReLUOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ReLUTensorOp<TensorT, DeviceT>>(ReLUTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "ReLUGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ReLUGradTensorOp<TensorT, DeviceT>>(ReLUGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "ELUOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ELUTensorOp<TensorT, DeviceT>>( ELUTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2], op_class->getParameters()[3])); return op_tensor_class; } else if (op_class->getName() == "ELUGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ELUGradTensorOp<TensorT, DeviceT>>( ELUGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2], op_class->getParameters()[3])); return op_tensor_class; } else if (op_class->getName() == "SigmoidOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SigmoidTensorOp<TensorT, DeviceT>>( SigmoidTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "SigmoidGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SigmoidGradTensorOp<TensorT, DeviceT>>( SigmoidGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "TanHOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<TanHTensorOp<TensorT, DeviceT>>( TanHTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "TanHGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<TanHGradTensorOp<TensorT, DeviceT>>( TanHGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "ReTanHOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ReTanHTensorOp<TensorT, DeviceT>>( ReTanHTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "ReTanHGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ReTanHGradTensorOp<TensorT, DeviceT>>( ReTanHGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "LinearOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<LinearTensorOp<TensorT, DeviceT>>( LinearTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "LinearGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<LinearGradTensorOp<TensorT, DeviceT>>( LinearGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "InverseOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<InverseTensorOp<TensorT, DeviceT>>( InverseTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "InverseGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<InverseGradTensorOp<TensorT, DeviceT>>( InverseGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "ExponentialOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ExponentialTensorOp<TensorT, DeviceT>>( ExponentialTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "ExponentialGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ExponentialGradTensorOp<TensorT, DeviceT>>( ExponentialGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "LogOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<LogTensorOp<TensorT, DeviceT>>( LogTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "LogGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<LogGradTensorOp<TensorT, DeviceT>>( LogGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "PowOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<PowTensorOp<TensorT, DeviceT>>( PowTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2], op_class->getParameters()[3])); return op_tensor_class; } else if (op_class->getName() == "PowGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<PowGradTensorOp<TensorT, DeviceT>>( PowGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2], op_class->getParameters()[3])); return op_tensor_class; } else if (op_class->getName() == "LeakyReLUOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<LeakyReLUTensorOp<TensorT, DeviceT>>( LeakyReLUTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2], op_class->getParameters()[3])); return op_tensor_class; } else if (op_class->getName() == "LeakyReLUGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<LeakyReLUGradTensorOp<TensorT, DeviceT>>( LeakyReLUGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2], op_class->getParameters()[3])); return op_tensor_class; } else if (op_class->getName() == "SinOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SinTensorOp<TensorT, DeviceT>>( SinTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "SinGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SinGradTensorOp<TensorT, DeviceT>>( SinGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "CosOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<CosTensorOp<TensorT, DeviceT>>( CosTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "CosGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<CosGradTensorOp<TensorT, DeviceT>>( CosGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "BatchNormOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<BatchNormTensorOp<TensorT, DeviceT>>( BatchNormTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "BatchNormGradOp") { std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<BatchNormGradTensorOp<TensorT, DeviceT>>( BatchNormGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else { std::cout << "No conversion available for " << op_class->getName() << "." << std::endl; std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared< LinearTensorOp<TensorT, DeviceT>>( LinearTensorOp<TensorT, DeviceT>()); return op_tensor_class; } } std::vector<TensorT> getTensorParams(std::shared_ptr<ActivationOp<TensorT>>& op_class) const { return std::vector<TensorT>(); } }; template<typename TensorT, typename DeviceT> class SolverOpToSolverTensorOp : public OpToTensorOp<TensorT, DeviceT, SolverOp<TensorT>, SolverTensorOp<TensorT, DeviceT>> { public: std::shared_ptr<SolverTensorOp<TensorT, DeviceT>> convertOpToTensorOp(std::shared_ptr<SolverOp<TensorT>>& op_class) const { if (op_class->getName() == "SGDOp") { std::shared_ptr<SolverTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SGDTensorOp<TensorT, DeviceT>>( SGDTensorOp<TensorT, DeviceT>(op_class->getGradientThreshold(), op_class->getGradientNoiseSigma(), op_class->getGradientNoiseGamma())); return op_tensor_class; } else if (op_class->getName() == "SSDOp") { std::shared_ptr<SolverTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SSDTensorOp<TensorT, DeviceT>>( SSDTensorOp<TensorT, DeviceT>(op_class->getGradientThreshold(), op_class->getGradientNoiseSigma(), op_class->getGradientNoiseGamma())); return op_tensor_class; } else if (op_class->getName() == "AdamOp") { std::shared_ptr<SolverTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<AdamTensorOp<TensorT, DeviceT>>( AdamTensorOp<TensorT, DeviceT>(op_class->getGradientThreshold(), op_class->getGradientNoiseSigma(), op_class->getGradientNoiseGamma())); return op_tensor_class; } else if (op_class->getName() == "SVAGOp") { std::shared_ptr<SolverTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SVAGTensorOp<TensorT, DeviceT>>( SVAGTensorOp<TensorT, DeviceT>(op_class->getGradientThreshold(), op_class->getGradientNoiseSigma(), op_class->getGradientNoiseGamma())); return op_tensor_class; } else if (op_class->getName() == "DummySolverOp") { std::shared_ptr<SolverTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<DummySolverTensorOp<TensorT, DeviceT>>( DummySolverTensorOp<TensorT, DeviceT>(op_class->getGradientThreshold(), op_class->getGradientNoiseSigma(), op_class->getGradientNoiseGamma())); return op_tensor_class; } else { std::cout << "No conversion available for " << op_class->getName() << "." << std::endl; std::shared_ptr<SolverTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<DummySolverTensorOp<TensorT, DeviceT>>( DummySolverTensorOp<TensorT, DeviceT>()); return op_tensor_class; } } std::vector<TensorT> getTensorParams(std::shared_ptr<SolverOp<TensorT>>& op_class) const { return op_class->getParameters(); } }; template<typename TensorT, typename DeviceT> class LossFunctionOpToLossFunctionTensorOp : public OpToTensorOp<TensorT, DeviceT, LossFunctionOp<TensorT>, LossFunctionTensorOp<TensorT, DeviceT>> { public: std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> convertOpToTensorOp(std::shared_ptr<LossFunctionOp<TensorT>>& op_class) const { if (op_class->getName() == "ManhattanDistanceLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ManhattanDistanceLossTensorOp<TensorT, DeviceT>>( ManhattanDistanceLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "L2NormLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<L2NormLossTensorOp<TensorT, DeviceT>>( L2NormLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "BCELossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<BCELossTensorOp<TensorT, DeviceT>>( BCELossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "NegativeLogLikelihoodLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<NegativeLogLikelihoodLossTensorOp<TensorT, DeviceT>>( NegativeLogLikelihoodLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MSELossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MSELossTensorOp<TensorT, DeviceT>>( MSELossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MAELossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MAELossTensorOp<TensorT, DeviceT>>( MAELossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MRSELossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MRSELossTensorOp<TensorT, DeviceT>>( MRSELossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MLELossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MLELossTensorOp<TensorT, DeviceT>>( MLELossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "KLDivergenceMuLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<KLDivergenceMuLossTensorOp<TensorT, DeviceT>>( KLDivergenceMuLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "KLDivergenceLogVarLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<KLDivergenceLogVarLossTensorOp<TensorT, DeviceT>>( KLDivergenceLogVarLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "BCEWithLogitsLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<BCEWithLogitsLossTensorOp<TensorT, DeviceT>>( BCEWithLogitsLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "CrossEntropyWithLogitsLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<CrossEntropyWithLogitsLossTensorOp<TensorT, DeviceT>>( CrossEntropyWithLogitsLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MSERangeUBLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MSERangeUBLossTensorOp<TensorT, DeviceT>>( MSERangeUBLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MSERangeLBLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MSERangeLBLossTensorOp<TensorT, DeviceT>>( MSERangeLBLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "KLDivergenceCatLossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<KLDivergenceCatLossTensorOp<TensorT, DeviceT>>( KLDivergenceCatLossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "MAPELossOp") { std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MAPELossTensorOp<TensorT, DeviceT>>( MAPELossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else { std::cout << "No conversion available for " << op_class->getName() << "." << std::endl; std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MSELossTensorOp<TensorT, DeviceT>>( MSELossTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } } std::vector<TensorT> getTensorParams(std::shared_ptr<LossFunctionOp<TensorT>>& op_class) const { return std::vector<TensorT>(); } }; template<typename TensorT, typename DeviceT> class LossFunctionGradOpToLossFunctionGradTensorOp : public OpToTensorOp<TensorT, DeviceT, LossFunctionGradOp<TensorT>, LossFunctionGradTensorOp<TensorT, DeviceT>> { public: std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> convertOpToTensorOp(std::shared_ptr<LossFunctionGradOp<TensorT>>& op_class) const { if (op_class->getName() == "ManhattanDistanceLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ManhattanDistanceLossGradTensorOp<TensorT, DeviceT>>( ManhattanDistanceLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "L2NormLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<L2NormLossGradTensorOp<TensorT, DeviceT>>( L2NormLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "BCELossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<BCELossGradTensorOp<TensorT, DeviceT>>( BCELossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "NegativeLogLikelihoodLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<NegativeLogLikelihoodLossGradTensorOp<TensorT, DeviceT>>( NegativeLogLikelihoodLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MSELossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MSELossGradTensorOp<TensorT, DeviceT>>( MSELossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MAELossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MAELossGradTensorOp<TensorT, DeviceT>>( MAELossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MRSELossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MRSELossGradTensorOp<TensorT, DeviceT>>( MRSELossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MLELossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MLELossGradTensorOp<TensorT, DeviceT>>( MLELossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "KLDivergenceMuLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<KLDivergenceMuLossGradTensorOp<TensorT, DeviceT>>( KLDivergenceMuLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "KLDivergenceLogVarLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<KLDivergenceLogVarLossGradTensorOp<TensorT, DeviceT>>( KLDivergenceLogVarLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "BCEWithLogitsLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<BCEWithLogitsLossGradTensorOp<TensorT, DeviceT>>( BCEWithLogitsLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "CrossEntropyWithLogitsLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<CrossEntropyWithLogitsLossGradTensorOp<TensorT, DeviceT>>( CrossEntropyWithLogitsLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MSERangeLBLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MSERangeLBLossGradTensorOp<TensorT, DeviceT>>( MSERangeLBLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "MSERangeUBLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MSERangeUBLossGradTensorOp<TensorT, DeviceT>>( MSERangeUBLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else if (op_class->getName() == "KLDivergenceCatLossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<KLDivergenceCatLossGradTensorOp<TensorT, DeviceT>>( KLDivergenceCatLossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1], op_class->getParameters()[2])); return op_tensor_class; } else if (op_class->getName() == "MAPELossGradOp") { std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MAPELossGradTensorOp<TensorT, DeviceT>>( MAPELossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } else { std::cout << "No conversion available for " << op_class->getName() << "." << std::endl; std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MSELossGradTensorOp<TensorT, DeviceT>>( MSELossGradTensorOp<TensorT, DeviceT>(op_class->getParameters()[0], op_class->getParameters()[1])); return op_tensor_class; } } std::vector<TensorT> getTensorParams(std::shared_ptr<LossFunctionGradOp<TensorT>>& op_class) const { return std::vector<TensorT>(); } }; template<typename TensorT, typename DeviceT> class IntegrationOpToIntegrationTensorOp : public OpToTensorOp<TensorT, DeviceT, IntegrationOp<TensorT>, IntegrationTensorOp<TensorT, DeviceT>> { public: std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> convertOpToTensorOp(std::shared_ptr<IntegrationOp<TensorT>>& op_class) const { if (op_class->getName() == "SumOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SumTensorOp<TensorT, DeviceT>>( SumTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "ProdOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ProdTensorOp<TensorT, DeviceT>>( ProdTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "ProdSCOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ProdSCTensorOp<TensorT, DeviceT>>( ProdSCTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MeanOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MeanTensorOp<TensorT, DeviceT>>( MeanTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MaxOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MaxTensorOp<TensorT, DeviceT>>( MaxTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MinOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MinTensorOp<TensorT, DeviceT>>( MinTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "VarModOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<VarModTensorOp<TensorT, DeviceT>>( VarModTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "VarOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<VarTensorOp<TensorT, DeviceT>>( VarTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "CountOp") { std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<CountTensorOp<TensorT, DeviceT>>( CountTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else { std::cout << "No conversion available for " << op_class->getName() << "." << std::endl; std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SumTensorOp<TensorT, DeviceT>>( SumTensorOp<TensorT, DeviceT>()); return op_tensor_class; } } std::vector<TensorT> getTensorParams(std::shared_ptr<IntegrationOp<TensorT>>& op_class) const { return std::vector<TensorT>(); } }; template<typename TensorT, typename DeviceT> class IntegrationErrorOpToIntegrationErrorTensorOp : public OpToTensorOp<TensorT, DeviceT, IntegrationErrorOp<TensorT>, IntegrationErrorTensorOp<TensorT, DeviceT>> { public: std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> convertOpToTensorOp(std::shared_ptr<IntegrationErrorOp<TensorT>>& op_class) const { if (op_class->getName() == "SumErrorOp") { std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SumErrorTensorOp<TensorT, DeviceT>>( SumErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "ProdErrorOp") { std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ProdErrorTensorOp<TensorT, DeviceT>>( ProdErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MeanErrorOp") { std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MeanErrorTensorOp<TensorT, DeviceT>>( MeanErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MaxErrorOp") { std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MaxErrorTensorOp<TensorT, DeviceT>>( MaxErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MinErrorOp") { std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MinErrorTensorOp<TensorT, DeviceT>>( MinErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "VarModErrorOp") { std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<VarModErrorTensorOp<TensorT, DeviceT>>( VarModErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "VarErrorOp") {// [TODO: ] std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<VarErrorTensorOp<TensorT, DeviceT>>( VarErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "CountErrorOp") { std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<CountErrorTensorOp<TensorT, DeviceT>>( CountErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else { std::cout << "No conversion available for " << op_class->getName() << "." << std::endl; std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SumErrorTensorOp<TensorT, DeviceT>>( SumErrorTensorOp<TensorT, DeviceT>()); return op_tensor_class; } } std::vector<TensorT> getTensorParams(std::shared_ptr<IntegrationErrorOp<TensorT>>& op_class) const { return std::vector<TensorT>(); } }; template<typename TensorT, typename DeviceT> class IntegrationWeightGradOpToIntegrationWeightGradTensorOp : public OpToTensorOp<TensorT, DeviceT, IntegrationWeightGradOp<TensorT>, IntegrationWeightGradTensorOp<TensorT, DeviceT>> { public: std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> convertOpToTensorOp(std::shared_ptr<IntegrationWeightGradOp<TensorT>>& op_class) const { if (op_class->getName() == "SumWeightGradOp") { std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SumWeightGradTensorOp<TensorT, DeviceT>>( SumWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "ProdWeightGradOp") { std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ProdWeightGradTensorOp<TensorT, DeviceT>>( ProdWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MeanWeightGradOp") { std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MeanWeightGradTensorOp<TensorT, DeviceT>>( MeanWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MaxWeightGradOp") { std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MaxWeightGradTensorOp<TensorT, DeviceT>>( MaxWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MinWeightGradOp") { std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MinWeightGradTensorOp<TensorT, DeviceT>>( MinWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "VarModWeightGradOp") { std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<VarModWeightGradTensorOp<TensorT, DeviceT>>( VarModWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "VarWeightGradOp") { std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<VarWeightGradTensorOp<TensorT, DeviceT>>( VarWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "CountWeightGradOp") { std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<CountWeightGradTensorOp<TensorT, DeviceT>>( CountWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else { std::cout << "No conversion available for " << op_class->getName() << "." << std::endl; std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<SumWeightGradTensorOp<TensorT, DeviceT>>( SumWeightGradTensorOp<TensorT, DeviceT>()); return op_tensor_class; } } std::vector<TensorT> getTensorParams(std::shared_ptr<IntegrationWeightGradOp<TensorT>>& op_class) const { return std::vector<TensorT>(); } }; template<typename TensorT, typename DeviceT> class MetricFunctionOpToMetricFunctionTensorOp : public OpToTensorOp<TensorT, DeviceT, MetricFunctionOp<TensorT>, MetricFunctionTensorOp<TensorT, DeviceT>> { public: std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> convertOpToTensorOp(std::shared_ptr<MetricFunctionOp<TensorT>>& op_class) const { if (op_class->getName() == "AccuracyBCOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<AccuracyBCTensorOp<TensorT, DeviceT>>( AccuracyBCTensorOp<TensorT, DeviceT>(op_class->getParameters().at(0))); return op_tensor_class; } else if (op_class->getName() == "AccuracyMCMicroOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<AccuracyMCMicroTensorOp<TensorT, DeviceT>>( AccuracyMCMicroTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "AccuracyMCMacroOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<AccuracyMCMacroTensorOp<TensorT, DeviceT>>( AccuracyMCMacroTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "PrecisionBCOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<PrecisionBCTensorOp<TensorT, DeviceT>>( PrecisionBCTensorOp<TensorT, DeviceT>(op_class->getParameters().at(0))); return op_tensor_class; } else if (op_class->getName() == "PrecisionMCMicroOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<PrecisionMCMicroTensorOp<TensorT, DeviceT>>( PrecisionMCMicroTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "PrecisionMCMacroOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<PrecisionMCMacroTensorOp<TensorT, DeviceT>>( PrecisionMCMacroTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "RecallBCOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<RecallBCTensorOp<TensorT, DeviceT>>( RecallBCTensorOp<TensorT, DeviceT>(op_class->getParameters().at(0))); return op_tensor_class; } else if (op_class->getName() == "RecallMCMicroOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<RecallMCMicroTensorOp<TensorT, DeviceT>>( RecallMCMicroTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "RecallMCMacroOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<RecallMCMacroTensorOp<TensorT, DeviceT>>( RecallMCMacroTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "F1ScoreBCOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<F1ScoreBCTensorOp<TensorT, DeviceT>>( F1ScoreBCTensorOp<TensorT, DeviceT>(op_class->getParameters().at(0))); return op_tensor_class; } else if (op_class->getName() == "F1ScoreMCMicroOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<F1ScoreMCMicroTensorOp<TensorT, DeviceT>>( F1ScoreMCMicroTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "F1ScoreMCMacroOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<F1ScoreMCMacroTensorOp<TensorT, DeviceT>>( F1ScoreMCMacroTensorOp<TensorT, DeviceT>()); return op_tensor_class; } else if (op_class->getName() == "MAEOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MAETensorOp<TensorT, DeviceT>>( MAETensorOp<TensorT, DeviceT>(op_class->getReductionFunc())); return op_tensor_class; } else if (op_class->getName() == "CosineSimilarityOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<CosineSimilarityTensorOp<TensorT, DeviceT>>( CosineSimilarityTensorOp<TensorT, DeviceT>(op_class->getReductionFunc())); return op_tensor_class; } else if (op_class->getName() == "PearsonROp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<PearsonRTensorOp<TensorT, DeviceT>>( PearsonRTensorOp<TensorT, DeviceT>(op_class->getReductionFunc())); return op_tensor_class; } else if (op_class->getName() == "EuclideanDistOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<EuclideanDistTensorOp<TensorT, DeviceT>>( EuclideanDistTensorOp<TensorT, DeviceT>(op_class->getReductionFunc())); return op_tensor_class; } else if (op_class->getName() == "ManhattanDistOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<ManhattanDistTensorOp<TensorT, DeviceT>>( ManhattanDistTensorOp<TensorT, DeviceT>(op_class->getReductionFunc())); return op_tensor_class; } else if (op_class->getName() == "JeffreysAndMatusitaDistOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<JeffreysAndMatusitaDistTensorOp<TensorT, DeviceT>>( JeffreysAndMatusitaDistTensorOp<TensorT, DeviceT>(op_class->getReductionFunc())); return op_tensor_class; } else if (op_class->getName() == "LogarithmicDistOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<LogarithmicDistTensorOp<TensorT, DeviceT>>( LogarithmicDistTensorOp<TensorT, DeviceT>(op_class->getReductionFunc())); return op_tensor_class; } else if (op_class->getName() == "PercentDifferenceOp") { std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<PercentDifferenceTensorOp<TensorT, DeviceT>>( PercentDifferenceTensorOp<TensorT, DeviceT>(op_class->getReductionFunc())); return op_tensor_class; } else { std::cout << "No conversion available for " << op_class->getName() << "." << std::endl; std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> op_tensor_class = std::make_shared<MAETensorOp<TensorT, DeviceT>>( MAETensorOp<TensorT, DeviceT>()); return op_tensor_class; } } std::vector<TensorT> getTensorParams(std::shared_ptr<MetricFunctionOp<TensorT>>& op_class) const { return std::vector<TensorT>(); } }; } #endif //EVONET_OPTOTENSOROP_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelInterpreterFile test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(ModelInterpreterFile1) Model<float> makeModel1() { /** * Directed Acyclic Graph Toy Network Model */ Node<float> i1, i2, h1, h2, o1, o2, b1, b2; Link l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4; Weight<float> w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4; Model<float> model1; // Toy network: 1 hidden layer, fully connected, DAG i1 = Node<float>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("2", NodeType::hidden, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h2 = Node<float>("3", NodeType::hidden, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("4", NodeType::output, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("5", NodeType::output, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<float>("6", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<float>("7", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1 = Weight<float>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2 = Weight<float>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3 = Weight<float>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w4 = Weight<float>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb1 = Weight<float>("4", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb2 = Weight<float>("5", weight_init, solver); // input layer + bias l1 = Link("0", "0", "2", "0"); l2 = Link("1", "0", "3", "1"); l3 = Link("2", "1", "2", "2"); l4 = Link("3", "1", "3", "3"); lb1 = Link("4", "6", "2", "4"); lb2 = Link("5", "6", "3", "5"); // weights weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w5 = Weight<float>("6", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w6 = Weight<float>("7", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w7 = Weight<float>("8", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w8 = Weight<float>("9", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb3 = Weight<float>("10", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb4 = Weight<float>("11", weight_init, solver); // hidden layer + bias l5 = Link("6", "2", "4", "6"); l6 = Link("7", "2", "5", "7"); l7 = Link("8", "3", "4", "8"); l8 = Link("9", "3", "5", "9"); lb3 = Link("10", "7", "4", "10"); lb4 = Link("11", "7", "5", "11"); model1.setId(1); model1.setName("1"); model1.addNodes({ i1, i2, h1, h2, o1, o2, b1, b2 }); model1.addWeights({ w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4 }); model1.addLinks({ l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4 }); model1.setInputAndOutputNodes(); return model1; } BOOST_AUTO_TEST_CASE(constructor) { ModelInterpreterFileDefaultDevice<float>* ptr = nullptr; ModelInterpreterFileDefaultDevice<float>* nullPointer = nullptr; ptr = new ModelInterpreterFileDefaultDevice<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ModelInterpreterFileDefaultDevice<float>* ptr = nullptr; ptr = new ModelInterpreterFileDefaultDevice<float>(); delete ptr; } Model<float> model1 = makeModel1(); BOOST_AUTO_TEST_CASE(loadModelBinary1) { ModelInterpreterFileDefaultDevice<float> data; // START: model_interpreter test taken from ModelinterpreterCpu_test ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const bool train = true; // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model1, batch_size, memory_size, train, false, true, true); // Store the model interpreter std::string filename = "ModelInterpreterFileTest.binary"; data.storeModelInterpreterBinary(filename, model_interpreter); // Read in the test model_interpreter ModelInterpreterDefaultDevice<float> model_interpreter_test; data.loadModelInterpreterBinary(filename, model_interpreter_test); BOOST_CHECK(model_interpreter_test.getTensorOpsSteps() == model_interpreter.getTensorOpsSteps()); BOOST_CHECK_EQUAL(model_interpreter_test.getModelResources().size(), model_interpreter.getModelResources().size()); //BOOST_CHECK(model_interpreter_test.getModelResources() == model_interpreter.getModelResources()); } Model<float> model2 = makeModel1(); BOOST_AUTO_TEST_CASE(loadModelBinary2) { ModelInterpreterFileDefaultDevice<float> data; // START: model_interpreter test taken from ModelinterpreterCpu_test ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const bool train = true; // update the model solver std::shared_ptr<SolverOp<float>> solver(new AdamOp<float>(0.001, 0.9, 0.999, 1e-8)); for (auto& weight_map : model2.getWeightsMap()) { if (weight_map.second->getSolverOpShared()->getName() == "SGDOp") weight_map.second->setSolverOp(solver); } // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model2, batch_size, memory_size, train, false, true, true); model_interpreter.allocateModelErrorTensor(batch_size, memory_size, 0); // create the input const std::vector<std::string> node_ids = { "0", "1" }; Eigen::Tensor<float, 3> input(batch_size, memory_size, (int)node_ids.size()); input.setValues({ {{1, 5}}, {{2, 6}}, {{3, 7}}, {{4, 8}} }); // create the expected output std::vector<std::string> output_nodes = { "4", "5" }; Eigen::Tensor<float, 2> expected(batch_size, (int)output_nodes.size()); expected.setValues({ {0, 1}, {0, 1}, {0, 1}, {0, 1} }); LossFunctionTensorOp<float, Eigen::DefaultDevice>* loss_function = new MSELossTensorOp<float, Eigen::DefaultDevice>(); LossFunctionGradTensorOp<float, Eigen::DefaultDevice>* loss_function_grad = new MSELossGradTensorOp<float, Eigen::DefaultDevice>(); const int layer_id = model2.getNode("4").getTensorIndex().first; // iterate until we find the optimal values const int max_iter = 20; for (int iter = 0; iter < max_iter; ++iter) { // assign the input data model_interpreter.mapValuesToLayers(model2, input, node_ids, "output"); model_interpreter.initBiases(model2); // create the bias model_interpreter.executeForwardPropogationOperations(0); //FP // calculate the model error and node output error model_interpreter.executeModelErrorOperations(expected, layer_id, loss_function, loss_function_grad, 0); std::cout << "Error at iteration: " << iter << " is " << model_interpreter.getModelError()->getError().sum() << std::endl; model_interpreter.executeBackwardPropogationOperations(0); // BP model_interpreter.executeWeightErrorOperations(); // Weight error model_interpreter.executeWeightUpdateOperations(0); // Weight update // reinitialize the model if (iter != max_iter - 1) { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } } const Eigen::Tensor<float, 0> total_error = model_interpreter.getModelError()->getError().sum(); BOOST_CHECK(total_error(0) <= 757.0); // END: model_interpreter test taken from ModelinterpreterCpu_test // Store the model interpreter std::string filename = "ModelInterpreterFileTest.binary"; data.storeModelInterpreterBinary(filename, model_interpreter); // Read in the test model_interpreter ModelInterpreterDefaultDevice<float> model_interpreter_test; data.loadModelInterpreterBinary(filename, model_interpreter_test); // Test for the expected model_interpreter operations model_interpreter.getModelResults(model2, true, true, true, true); model_interpreter.clear_cache(); // Compile the graph into a set of operations and allocate all tensors model_interpreter_test.getForwardPropogationOperations(model2, batch_size, memory_size, train, false, true, true); model_interpreter_test.allocateModelErrorTensor(batch_size, memory_size, 0); BOOST_CHECK(model_interpreter_test == model_interpreter); // Trivial comparison; instead we use the following from `ModelInterpreterCpu_test.cpp` // RE-START: model_interpreter test taken from ModelinterpreterCpu_test // iterate until we find the optimal values for (int iter = 0; iter < max_iter; ++iter) { // assign the input data model_interpreter_test.mapValuesToLayers(model2, input, node_ids, "output"); model_interpreter_test.initBiases(model2); // create the bias model_interpreter_test.executeForwardPropogationOperations(0); //FP // calculate the model error and node output error model_interpreter_test.executeModelErrorOperations(expected, layer_id, loss_function, loss_function_grad, 0); std::cout << "Error at iteration: " << iter << " is " << model_interpreter_test.getModelError()->getError().sum() << std::endl; model_interpreter_test.executeBackwardPropogationOperations(0); // BP model_interpreter_test.executeWeightErrorOperations(); // Weight error model_interpreter_test.executeWeightUpdateOperations(0); // Weight update // reinitialize the model if (iter != max_iter - 1) { model_interpreter_test.reInitNodes(); model_interpreter_test.reInitModelError(); } } const Eigen::Tensor<float, 0> total_error_test = model_interpreter_test.getModelError()->getError().sum(); BOOST_CHECK(total_error_test(0) <= 757.0); // END RE-START: model_interpreter test taken from ModelinterpreterCpu_test } Model<float> model3 = makeModel1(); BOOST_AUTO_TEST_CASE(loadModelCsv1) { ModelInterpreterFileDefaultDevice<float> data; // START: model_interpreter test taken from ModelinterpreterCpu_test ModelInterpreterDefaultDevice<float> model_interpreter; const int batch_size = 4; const int memory_size = 1; const bool train = true; // compile the graph into a set of operations and allocate all tensors model_interpreter.getForwardPropogationOperations(model3, batch_size, memory_size, train, false, true, true); // Store the model interpreter std::string filename = "ModelInterpreterFileTest.csv"; data.storeModelInterpreterCsv(filename, model_interpreter); // NO TESTS } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_METABOLOMICSLATENTARITHMETICDATASIMULATOR_H #define EVONET_METABOLOMICSLATENTARITHMETICDATASIMULATOR_H // .h #include <EvoNet/simulator/BiochemicalDataSimulator.h> namespace EvoNet { template<typename TensorT> class MetabolomicsLatentArithmeticDataSimulator : public BiochemicalDataSimulator<TensorT> { public: void makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void readAndProcessMetabolomicsTrainingAndValidationData(int& n_reaction_ids_training, int& n_labels_training, int& n_component_group_names_training, int& n_reaction_ids_validation, int& n_labels_validation, int& n_component_group_names_validation, const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train, const std::string& meta_data_filename_train, const std::string& metabo_data_filename_test, const std::string& meta_data_filename_test, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& offline_linear_scale_input, const bool& offline_log_transform_input, const bool& offline_standardize_input, const bool& online_linear_scale_input, const bool& online_log_transform_input, const bool& online_standardize_input, int& n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int& n_epochs, const int& batch_size, const int& memory_size) override; }; template<typename TensorT> inline void MetabolomicsLatentArithmeticDataSimulator<TensorT>::makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int & n_epochs, const int & batch_size, const int & memory_size, const int & n_input_nodes, const int & n_loss_output_nodes, const int & n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes const int input_nodes = data_training.dimension(0); assert(n_input_nodes == 2 * input_nodes); assert(n_loss_output_nodes == input_nodes); assert(n_metric_output_nodes == input_nodes); assert(data_training.dimension(0) == features.size()); assert(data_training.dimension(1) == labels_training.size()); assert(this->n_encodings_continuous_ > 0); assert(this->n_encodings_discrete_ > 0); assert(batch_size > 0); assert(memory_size == 1); assert(n_epochs == this->labels_training_.size() * this->labels_training_.size()); // (addition or subtraction) * labels**2 // initialize the Tensors this->input_data_training_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_training_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_training_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_training_.resize(batch_size, memory_size, n_epochs); // expand the training data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_training.dimension(1))), 1); assert(expansion_factor == this->labels_training_.size()); const int over_expanded = data_training.dimension(1)*expansion_factor - batch_size * n_epochs; assert(over_expanded == 0); assert(batch_size * memory_size * n_epochs == data_training.dimension(1)*expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_training_expanded(data_training.dimension(0), data_training.dimension(1)*expansion_factor); Eigen::Tensor<std::string, 2> labels_training_expanded(data_training.dimension(1)*expansion_factor, 1); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i*data_training.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_training.dimension(0), data_training.dimension(1) }; data_training_expanded.slice(offset1, span1) = data_training; // Slices for the labels for (int j = 0; j < data_training.dimension(1); ++j) { labels_training_expanded(i*data_training.dimension(1) + j, 0) = labels_training.at(j); } } // optionally shuffle the data and labels if (shuffle_data_and_labels) { MakeShuffleMatrix<TensorT> shuffleMatrix(data_training.dimension(1) * expansion_factor, true); shuffleMatrix(data_training_expanded, true); } // assign the input tensors auto data_training_expanded_4d = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), data_training.dimension(1)*expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_training.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_training_expanded_4d; this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_training_expanded_4d; // assign the loss tensors this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_training_expanded_4d; // assign the metric tensors this->metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_training_expanded_4d; } template<typename TensorT> inline void MetabolomicsLatentArithmeticDataSimulator<TensorT>::makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int & n_epochs, const int & batch_size, const int & memory_size, const int & n_input_nodes, const int & n_loss_output_nodes, const int & n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes const int input_nodes = data_validation.dimension(0); assert(n_input_nodes == 2 * input_nodes); assert(n_loss_output_nodes == input_nodes); assert(n_metric_output_nodes == input_nodes); assert(data_validation.dimension(0) == features.size()); assert(data_validation.dimension(1) == labels_validation.size()); assert(this->n_encodings_continuous_ > 0); assert(this->n_encodings_discrete_ > 0); // initialize the Tensors this->input_data_validation_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_validation_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_validation_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_validation_.resize(batch_size, memory_size, n_epochs); // expand the validation data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_validation.dimension(1))), 1); const int over_expanded = data_validation.dimension(1) * expansion_factor - batch_size * n_epochs; assert(batch_size * memory_size * n_epochs == data_validation.dimension(1) * expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_validation_expanded(data_validation.dimension(0), data_validation.dimension(1) * expansion_factor); Eigen::Tensor<std::string, 2> labels_validation_expanded(data_validation.dimension(1) * expansion_factor, 1); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i * data_validation.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_validation.dimension(0), data_validation.dimension(1) }; data_validation_expanded.slice(offset1, span1) = data_validation; // Slices for the labels for (int j = 0; j < data_validation.dimension(1); ++j) { labels_validation_expanded(i * data_validation.dimension(1) + j, 0) = labels_validation.at(j); } } // optionally shuffle the data and labels if (shuffle_data_and_labels) { MakeShuffleMatrix<TensorT> shuffleMatrix(data_validation.dimension(1) * expansion_factor, true); shuffleMatrix(data_validation_expanded, true); } // assign the input tensors auto data_validation_expanded_4d = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), data_validation.dimension(1) * expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_validation.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_validation_expanded_4d; this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_validation_expanded_4d; // assign the loss tensors this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_validation_expanded_4d; // assign the metric tensors this->metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_validation_expanded_4d; } template<typename TensorT> inline void MetabolomicsLatentArithmeticDataSimulator<TensorT>::readAndProcessMetabolomicsTrainingAndValidationData(int & n_reaction_ids_training, int & n_labels_training, int & n_component_group_names_training, int & n_reaction_ids_validation, int & n_labels_validation, int & n_component_group_names_validation, const std::string & biochem_rxns_filename, const std::string & metabo_data_filename_train, const std::string & meta_data_filename_train, const std::string & metabo_data_filename_test, const std::string & meta_data_filename_test, const bool & use_concentrations, const bool & use_MARs, const bool & sample_values, const bool & iter_values, const bool & fill_sampling, const bool & fill_mean, const bool & fill_zero, const bool & apply_fold_change, const std::string & fold_change_ref, const TensorT & fold_change_log_base, const bool & offline_linear_scale_input, const bool & offline_log_transform_input, const bool & offline_standardize_input, const bool & online_linear_scale_input, const bool & online_log_transform_input, const bool & online_standardize_input, int & n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int & n_epochs, const int & batch_size, const int & memory_size) { // Read in the data and make the data matrices std::vector<std::string> labels_training; std::vector<std::string> features_training; Eigen::Tensor<TensorT, 2> data_training; std::vector<std::string> labels_validation; std::vector<std::string> features_validation; Eigen::Tensor<TensorT, 2> data_validation; int n_epochs_ = std::sqrt(n_epochs); // We will iteratively expand the data when generating the caches this->readAndMakeMetabolomicsTrainingAndValidationDataMatrices(n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, features_training, data_training, labels_training, features_validation, data_validation, labels_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, use_concentrations, use_MARs, sample_values, iter_values, fill_sampling, fill_mean, fill_zero, apply_fold_change, fold_change_ref, fold_change_log_base, n_reps_per_sample, false, //randomize_sample_group_names, n_epochs_, batch_size, memory_size); // Make the training and validation data caches after an optional transformation step if (use_concentrations) { // Apply offline transformations this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, false, -1, -1, false, -1, -1); // Apply online transformations this->transformTrainingAndValidationDataOnline(data_training, data_validation, online_linear_scale_input, online_log_transform_input, online_standardize_input); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, 2*n_component_group_names_training, n_component_group_names_training, n_component_group_names_training, shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, 2*n_component_group_names_training, n_component_group_names_training, n_component_group_names_training, shuffle_data_and_labels); } else if (use_MARs) { // Apply offline transformations TensorT min_value = 1e-3; TensorT max_value = 1e3; if (offline_log_transform_input) { min_value = std::log(min_value); max_value = std::log(max_value); } this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, true, min_value, max_value, false, -1, -1); // Apply online transformations this->transformTrainingAndValidationDataOnline(data_training, data_validation, online_linear_scale_input, online_log_transform_input, online_standardize_input); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, 2*n_reaction_ids_validation, n_reaction_ids_validation, n_reaction_ids_validation, shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, 2*n_reaction_ids_validation, n_reaction_ids_validation, n_reaction_ids_validation, shuffle_data_and_labels); } } } #endif //EVONET_METABOLOMICSLATENTARITHMETICDATASIMULATOR_H<file_sep>### the directory name set(directory include/EvoNet/models) ### list all header files of the directory here set(sources_list_h CVAEFullyConn.h CVAEFullyConnDefaultDevice.h CVAEFullyConnGpu.h #VAEFullyConn.h #ClassifierFullyConn.h #ClassifierCovNet.h #ClassifierLSTM.h #ClassifierRecurrent.h #ClassifierDotProdAtt.h ) ### add path to the filenames set(sources_h) foreach(i ${sources_list_h}) list(APPEND sources_h ${directory}/${i}) endforeach(i) ### source group definition source_group("Header Files\\EvoNet\\models" FILES ${sources_h}) set(EvoNet_sources_h ${EvoNet_sources_h} ${sources_h}) <file_sep>#ifndef CIRCUIT_FINDER_H #define CIRCUIT_FINDER_H #include <algorithm> #include <iostream> #include <list> #include <vector> typedef std::list<int> NodeList; /* @brief Elementary circuit finding algorithm The algorithm is similar to Tiernan and Tarjan, but computationally more efficient References: <NAME>. FINDING ALL THE ELEMENTARY CIRCUITS OF A DIRECTED GRAPH. SIAM J. COMPUT. Vol. 4, No. 1, March 1975 */ class CircuitFinder { std::vector<NodeList> AK; std::vector<int> Stack; std::vector<std::pair<int,int>> Cycles; // source/sink pairs std::vector<bool> Blocked; std::vector<NodeList> B; int S; int N; void unblock(int U); bool circuit(int V); void output(); public: CircuitFinder(std::list<int>* adj, int n_nodes){ Blocked.resize(n_nodes); B.resize(n_nodes); AK.resize(n_nodes); N = n_nodes; for (int I = 0; I < n_nodes; ++I) { for (auto J = adj[I].begin(), F = adj[I].end(); J != F; ++J) { AK[I].push_back(*J); } } } void run(); std::vector<std::pair<int, int>> getCycles() { return Cycles; } }; void CircuitFinder::unblock(int U) { Blocked[U - 1] = false; while (!B[U - 1].empty()) { int W = B[U - 1].front(); B[U - 1].pop_front(); if (Blocked[W - 1]) { unblock(W); } } } bool CircuitFinder::circuit(int V) { bool F = false; Stack.push_back(V); Blocked[V - 1] = true; for (int W : AK[V - 1]) { if (W == S) { output(); F = true; } // [PR request] //else if (W > S && !Blocked[W - 1]) { // F = circuit(W); //} else if (!Blocked[W - 1]) { if (circuit(W)) F = true; } } if (F) { unblock(V); } else { for (int W : AK[V - 1]) { auto IT = std::find(B[W - 1].begin(), B[W - 1].end(), V); if (IT == B[W - 1].end()) { B[W - 1].push_back(V); } } } Stack.pop_back(); return F; } void CircuitFinder::output() { //std::cout << "circuit: "; //for (auto I = Stack.begin(), E = Stack.end(); I != E; ++I) { // std::cout << *I << " -> "; //} //std::cout << *Stack.begin() << std::endl; auto I = Stack.end(); --I; Cycles.push_back(std::make_pair(*I, *Stack.begin())); } void CircuitFinder::run() { Stack.clear(); S = 1; while (S < N) { for (int I = S; I <= N; ++I) { Blocked[I - 1] = false; B[I - 1].clear(); } circuit(S); // [PR request] // remove this vertex from the graph for (int I = S + 1; I <= N; ++I) AK[I - 1].remove(S); ++S; } } #endif // CIRCUIT_FINDER_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE CSVWriter test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/CSVWriter.h> #include <EvoNet/io/csv.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(CSVWriter1) BOOST_AUTO_TEST_CASE(constructor) { CSVWriter* ptr = nullptr; CSVWriter* nullPointer = nullptr; ptr = new CSVWriter(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { CSVWriter* ptr = nullptr; ptr = new CSVWriter(); delete ptr; } BOOST_AUTO_TEST_CASE(constructor2) { CSVWriter csvwriter("filename1", ";"); BOOST_CHECK_EQUAL(csvwriter.getFilename(), "filename1"); BOOST_CHECK_EQUAL(csvwriter.getDelimeter(), ";"); BOOST_CHECK_EQUAL(csvwriter.getLineCount(), 0); } BOOST_AUTO_TEST_CASE(gettersAndSetters) { CSVWriter csvwriter; csvwriter.setFilename("filename1"); csvwriter.setDelimeter(";"); csvwriter.setLineCount(1); BOOST_CHECK_EQUAL(csvwriter.getFilename(), "filename1"); BOOST_CHECK_EQUAL(csvwriter.getDelimeter(), ";"); BOOST_CHECK_EQUAL(csvwriter.getLineCount(), 1); } BOOST_AUTO_TEST_CASE(writeDataInRow) { std::string filename = "CSVWriterTest.csv"; std::vector<std::string> headers, line; CSVWriter csvwriter(filename); // Write the data to file headers = {"Column1", "Column2", "Column3"}; csvwriter.writeDataInRow(headers.begin(), headers.end()); line = {"a", "b", "c" }; csvwriter.writeDataInRow(line.begin(), line.end()); line = {"1", "2", "3" }; csvwriter.writeDataInRow(line.begin(), line.end()); // Read the data back in io::CSVReader<3> test_in(filename); test_in.read_header(io::ignore_extra_column, "Column1", "Column2", "Column3"); std::string col1, col2, col3; int cnt = 0; while(test_in.read_row(col1, col2, col3)) { if (cnt == 0) { BOOST_CHECK_EQUAL(col1, "a"); BOOST_CHECK_EQUAL(col2, "b"); BOOST_CHECK_EQUAL(col3, "c"); } else if (cnt == 1) { BOOST_CHECK_EQUAL(col1, "1"); BOOST_CHECK_EQUAL(col2, "2"); BOOST_CHECK_EQUAL(col3, "3"); } cnt += 1; } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #include "Metabolomics_example.h" using namespace SmartPeak; /* @brief Example using intracellular E. coli metabolomics data taken from re-grown glycerol stock solutions on Glucose M9 at mid-exponential phase from adaptive laboratory evolution (ALE) experiments following gene knockout (KO) */ /// Script to run the time-course Summary void main_statistics_timecourseSummary(const std::string& data_dir, bool run_timeCourse_Ref = false, bool run_timeCourse_Gnd = false, bool run_timeCourse_SdhCB = false, bool run_timeCourse_Pgi = false, bool run_timeCourse_PtsHIcrr = false, bool run_timeCourse_TpiA = false) { // define the data simulator BiochemicalReactionModel<float> metabolomics_data; std::string timeCourse_Ref_filename, timeCourse_Gnd_filename, timeCourse_SdhCB_filename, timeCourse_Pgi_filename, timeCourse_PtsHIcrr_filename, timeCourse_TpiA_filename, timeCourseSampleSummary_Ref_filename, timeCourseSampleSummary_Gnd_filename, timeCourseSampleSummary_SdhCB_filename, timeCourseSampleSummary_Pgi_filename, timeCourseSampleSummary_PtsHIcrr_filename, timeCourseSampleSummary_TpiA_filename, timeCourseFeatureSummary_Ref_filename, timeCourseFeatureSummary_Gnd_filename, timeCourseFeatureSummary_SdhCB_filename, timeCourseFeatureSummary_Pgi_filename, timeCourseFeatureSummary_PtsHIcrr_filename, timeCourseFeatureSummary_TpiA_filename; // filenames timeCourse_Ref_filename = data_dir + "EColi_timeCourse_Ref.csv"; timeCourse_Gnd_filename = data_dir + "EColi_timeCourse_Gnd.csv"; timeCourse_SdhCB_filename = data_dir + "EColi_timeCourse_SdhCB.csv"; timeCourse_Pgi_filename = data_dir + "EColi_timeCourse_Pgi.csv"; timeCourse_PtsHIcrr_filename = data_dir + "EColi_timeCourse_PtsHIcrr.csv"; timeCourse_TpiA_filename = data_dir + "EColi_timeCourse_TpiA.csv"; timeCourseSampleSummary_Ref_filename = data_dir + "EColi_timeCourseSampleSummary_Ref.csv"; timeCourseSampleSummary_Gnd_filename = data_dir + "EColi_timeCourseSampleSummary_Gnd.csv"; timeCourseSampleSummary_SdhCB_filename = data_dir + "EColi_timeCourseSampleSummary_SdhCB.csv"; timeCourseSampleSummary_Pgi_filename = data_dir + "EColi_timeCourseSampleSummary_Pgi.csv"; timeCourseSampleSummary_PtsHIcrr_filename = data_dir + "EColi_timeCourseSampleSummary_PtsHIcrr.csv"; timeCourseSampleSummary_TpiA_filename = data_dir + "EColi_timeCourseSampleSummary_TpiA.csv"; timeCourseFeatureSummary_Ref_filename = data_dir + "EColi_timeCourseFeatureSummary_Ref.csv"; timeCourseFeatureSummary_Gnd_filename = data_dir + "EColi_timeCourseFeatureSummary_Gnd.csv"; timeCourseFeatureSummary_SdhCB_filename = data_dir + "EColi_timeCourseFeatureSummary_SdhCB.csv"; timeCourseFeatureSummary_Pgi_filename = data_dir + "EColi_timeCourseFeatureSummary_Pgi.csv"; timeCourseFeatureSummary_PtsHIcrr_filename = data_dir + "EColi_timeCourseFeatureSummary_PtsHIcrr.csv"; timeCourseFeatureSummary_TpiA_filename = data_dir + "EColi_timeCourseFeatureSummary_TpiA.csv"; if (run_timeCourse_Ref) { // Read in the data PWData timeCourseRef; ReadPWData(timeCourse_Ref_filename, timeCourseRef); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseRef, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_Ref_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_Ref_filename, pw_feature_summaries); } if (run_timeCourse_Gnd) { // Read in the data PWData timeCourseGnd; ReadPWData(timeCourse_Gnd_filename, timeCourseGnd); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseGnd, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_Gnd_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_Gnd_filename, pw_feature_summaries); } if (run_timeCourse_SdhCB) { // Read in the data PWData timeCourseSdhCB; ReadPWData(timeCourse_SdhCB_filename, timeCourseSdhCB); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseSdhCB, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_SdhCB_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_SdhCB_filename, pw_feature_summaries); } if (run_timeCourse_Pgi) { // Read in the data PWData timeCoursePgi; ReadPWData(timeCourse_Pgi_filename, timeCoursePgi); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCoursePgi, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_Pgi_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_Pgi_filename, pw_feature_summaries); } if (run_timeCourse_PtsHIcrr) { // Read in the data PWData timeCoursePtsHIcrr; ReadPWData(timeCourse_PtsHIcrr_filename, timeCoursePtsHIcrr); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCoursePtsHIcrr, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_PtsHIcrr_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_PtsHIcrr_filename, pw_feature_summaries); } if (run_timeCourse_TpiA) { // Read in the data PWData timeCourseTpiA; ReadPWData(timeCourse_TpiA_filename, timeCourseTpiA); // Summarize the data PWSampleSummaries pw_sample_summaries; PWFeatureSummaries pw_feature_summaries; PWTotalSummary pw_total_summary; PWSummary(timeCourseTpiA, pw_sample_summaries, pw_feature_summaries, pw_total_summary); // Export to file WritePWSampleSummaries(timeCourseSampleSummary_TpiA_filename, pw_sample_summaries); WritePWFeatureSummaries(timeCourseFeatureSummary_TpiA_filename, pw_feature_summaries); } } /// Script to run the time-course MARs analysis void main_statistics_timecourse(const std::string& data_dir, bool run_timeCourse_Ref = false, bool run_timeCourse_Gnd = false, bool run_timeCourse_SdhCB = false, bool run_timeCourse_Pgi = false, bool run_timeCourse_PtsHIcrr = false, bool run_timeCourse_TpiA = false) { // define the data simulator BiochemicalReactionModel<float> metabolomics_data; std::string biochem_rxns_filename, metabo_data_filename, meta_data_filename, timeCourse_Ref_filename, timeCourse_Gnd_filename, timeCourse_SdhCB_filename, timeCourse_Pgi_filename, timeCourse_PtsHIcrr_filename, timeCourse_TpiA_filename; std::vector<std::string> pre_samples, timeCourse_Ref_samples, timeCourse_Gnd_samples, timeCourse_SdhCB_samples, timeCourse_Pgi_samples, timeCourse_PtsHIcrr_samples, timeCourse_TpiA_samples; // filenames biochem_rxns_filename = data_dir + "iJO1366.csv"; metabo_data_filename = data_dir + "ALEsKOs01_Metabolomics.csv"; meta_data_filename = data_dir + "ALEsKOs01_MetaData.csv"; timeCourse_Ref_filename = data_dir + "EColi_timeCourse_Ref.csv"; timeCourse_Gnd_filename = data_dir + "EColi_timeCourse_Gnd.csv"; timeCourse_SdhCB_filename = data_dir + "EColi_timeCourse_SdhCB.csv"; timeCourse_Pgi_filename = data_dir + "EColi_timeCourse_Pgi.csv"; timeCourse_PtsHIcrr_filename = data_dir + "EColi_timeCourse_PtsHIcrr.csv"; timeCourse_TpiA_filename = data_dir + "EColi_timeCourse_TpiA.csv"; timeCourse_Ref_samples = { "Evo04", "Evo04Evo01EP", "Evo04Evo02EP" }; timeCourse_Gnd_samples = { "Evo04", "Evo04gnd", "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04gndEvo03EP" }; timeCourse_SdhCB_samples = { "Evo04", "Evo04sdhCB", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04sdhCBEvo03EP", "Evo04sdhCBEvo03EP-2", "Evo04sdhCBEvo03EP-3", "Evo04sdhCBEvo03EP-4", "Evo04sdhCBEvo03EP-5", "Evo04sdhCBEvo03EP-6" }; timeCourse_Pgi_samples = { "Evo04", "Evo04pgi", "Evo04pgiEvo01EP", "Evo04pgiEvo01J01", "Evo04pgiEvo01J02", "Evo04pgiEvo02EP", "Evo04pgiEvo02J01", "Evo04pgiEvo02J02", "Evo04pgiEvo02J03", "Evo04pgiEvo03EP", "Evo04pgiEvo03J01", "Evo04pgiEvo03J02", "Evo04pgiEvo03J03", "Evo04pgiEvo04EP", "Evo04pgiEvo04J01", "Evo04pgiEvo04J02", "Evo04pgiEvo04J03", "Evo04pgiEvo05EP", "Evo04pgiEvo05J01", "Evo04pgiEvo05J02", "Evo04pgiEvo05J03", "Evo04pgiEvo06EP", "Evo04pgiEvo06J01", "Evo04pgiEvo06J02", "Evo04pgiEvo06J03", "Evo04pgiEvo07EP", "Evo04pgiEvo07J01", "Evo04pgiEvo07J02", "Evo04pgiEvo07J03", "Evo04pgiEvo08EP", "Evo04pgiEvo08J01", "Evo04pgiEvo08J02", "Evo04pgiEvo08J03" }; timeCourse_PtsHIcrr_samples = { "Evo04", "Evo04ptsHIcrr", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo01J01", "Evo04ptsHIcrrEvo01J03", "Evo04ptsHIcrrEvo02EP", "Evo04ptsHIcrrEvo02J01", "Evo04ptsHIcrrEvo02J03", "Evo04ptsHIcrrEvo03EP", "Evo04ptsHIcrrEvo03J01", "Evo04ptsHIcrrEvo03J03", "Evo04ptsHIcrrEvo03J04", "Evo04ptsHIcrrEvo04EP", "Evo04ptsHIcrrEvo04J01", "Evo04ptsHIcrrEvo04J03", "Evo04ptsHIcrrEvo04J04" }; timeCourse_TpiA_samples = { "Evo04", "Evo04tpiA", "Evo04tpiAEvo01EP", "Evo04tpiAEvo01J01", "Evo04tpiAEvo01J03", "Evo04tpiAEvo02EP", "Evo04tpiAEvo02J01", "Evo04tpiAEvo02J03", "Evo04tpiAEvo03EP", "Evo04tpiAEvo03J01", "Evo04tpiAEvo03J03", "Evo04tpiAEvo04EP", "Evo04tpiAEvo04J01", "Evo04tpiAEvo04J03" }; // read in the data metabolomics_data.readBiochemicalReactions(biochem_rxns_filename); metabolomics_data.readMetabolomicsData(metabo_data_filename); metabolomics_data.readMetaData(meta_data_filename); metabolomics_data.findComponentGroupNames(); metabolomics_data.findMARs(); metabolomics_data.findMARs(true, false); metabolomics_data.findMARs(false, true); metabolomics_data.findLabels(); if (run_timeCourse_Ref) { // Find significant pair-wise MARS between each sample (one vs one) PWData timeCourseRef = PWComparison(metabolomics_data, timeCourse_Ref_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_Ref_filename, timeCourseRef); } if (run_timeCourse_Gnd) { // Find significant pair-wise MARS between each sample (one vs one) PWData timeCourseGnd = PWComparison(metabolomics_data, timeCourse_Gnd_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_Gnd_filename, timeCourseGnd); } if (run_timeCourse_SdhCB) { // Find significant pair-wise MARS between each sample (one vs one) PWData timeCourseSdhCB = PWComparison(metabolomics_data, timeCourse_SdhCB_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_SdhCB_filename, timeCourseSdhCB); } if (run_timeCourse_Pgi) { // Find significant pair-wise MARS between each sample (one vs one) PWData timeCoursePgi = PWComparison(metabolomics_data, timeCourse_Pgi_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_Pgi_filename, timeCoursePgi); } if (run_timeCourse_PtsHIcrr) { // Find significant pair-wise MARS between each sample (one vs one) PWData timeCoursePtsHIcrr = PWComparison(metabolomics_data, timeCourse_PtsHIcrr_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_PtsHIcrr_filename, timeCoursePtsHIcrr); } if (run_timeCourse_TpiA) { // Find significant pair-wise MARS between each sample (one vs one) PWData timeCourseTpiA = PWComparison(metabolomics_data, timeCourse_TpiA_samples, 10000, 0.05, 1.0); // Export to file WritePWData(timeCourse_TpiA_filename, timeCourseTpiA); } } // Main int main(int argc, char** argv) { // Set the data directories //const std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_KALE/"; const std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_KALE/"; //const std::string data_dir = "/home/user/Data/"; main_statistics_timecourse(data_dir, true, true, true, true, true, true); main_statistics_timecourseSummary(data_dir, true, true, true, true, true, true); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELTRAINER_H #define EVONET_MODELTRAINER_H // .h #include <EvoNet/ml/Model.h> #include <EvoNet/ml/LossFunction.h> #include <EvoNet/ml/MetricFunction.h> #include <EvoNet/ml/ModelLogger.h> #include <EvoNet/simulator/DataSimulator.h> #include <vector> #include <string> // .cpp namespace EvoNet { template<typename TensorT> struct LossFunctionHelper { std::vector<std::string> output_nodes_; ///< output node names std::vector<std::shared_ptr<LossFunctionOp<TensorT>>> loss_functions_; ///< loss functions to apply to the node names std::vector<std::shared_ptr<LossFunctionGradOp<TensorT>>> loss_function_grads_; ///< corresponding loss function grads to apply to the node names }; template<typename TensorT> struct MetricFunctionHelper { std::vector<std::string> output_nodes_; ///< output node names std::vector<std::shared_ptr<MetricFunctionOp<TensorT>>> metric_functions_; ///< metric functions to apply to the node names std::vector<std::string> metric_names_; ///< corresponding metric function names given for each metric function }; /** @brief Class to train a network model */ template<typename TensorT, typename InterpreterT> class ModelTrainer { public: ModelTrainer() = default; ///< Default constructor ~ModelTrainer() = default; ///< Default destructor void setBatchSize(const int& batch_size) { batch_size_ = batch_size; }; ///< batch_size setter void setMemorySize(const int& memory_size) { memory_size_ = memory_size; }; ///< memory_size setter void setNEpochsTraining(const int& n_epochs) { n_epochs_training_ = n_epochs; }; ///< n_epochs setter void setNEpochsValidation(const int& n_epochs) { n_epochs_validation_ = n_epochs; }; ///< n_epochs setter void setNEpochsEvaluation(const int& n_epochs) { n_epochs_evaluation_ = n_epochs; }; ///< n_epochs setter void setVerbosityLevel(const int& verbosity_level) { verbosity_level_ = verbosity_level; }; ///< verbosity_level setter void setLogging(bool log_training = false, bool log_validation = false, bool log_evaluation = false); ///< enable_logging setter void setLossFunctionHelpers(const std::vector<LossFunctionHelper<TensorT>>& loss_function_helpers) { loss_function_helpers_ = loss_function_helpers; }; ///< loss_function_helpers setter [TODO: tests] void setMetricFunctionHelpers(const std::vector<MetricFunctionHelper<TensorT>>& metric_function_helpers) { metric_function_helpers_ = metric_function_helpers; }; ///< loss_function_helpers setter [TODO: tests] void setNTBPTTSteps(const int& n_TBPTT) { n_TBPTT_steps_ = n_TBPTT; }; ///< n_TBPTT setter void setNTETTSteps(const int& n_TETT) { n_TETT_steps_ = n_TETT; }; ///< n_TETT setter void setFindCycles(const bool& find_cycles) { find_cycles_ = find_cycles; }; ///< find_cycles setter [TODO: tests] void setFastInterpreter(const bool& fast_interpreter) { fast_interpreter_ = fast_interpreter; }; ///< fast_interpreter setter [TODO: tests] void setPreserveOoO(const bool& preserve_OoO) { preserve_OoO_ = preserve_OoO; }; ///< preserve_OoO setter [TODO: test] void setInterpretModel(const bool& interpret_model) { interpret_model_ = interpret_model; }; ///< interpret_model setter [TODO: test] void setResetModel(const bool& reset_model) { reset_model_ = reset_model; }; ///< reset_model setter [TODO: test] void setResetInterpreter(const bool& reset_interpreter) { reset_interpreter_ = reset_interpreter; }; ///< reset_interpreter setter [TODO: test] int getBatchSize() const { return batch_size_; }; ///< batch_size setter int getMemorySize() const { return memory_size_; }; ///< memory_size setter int getNEpochsTraining() const { return n_epochs_training_; }; ///< n_epochs setter int getNEpochsValidation() const { return n_epochs_validation_; }; ///< n_epochs setter int getNEpochsEvaluation() const { return n_epochs_evaluation_; }; ///< n_epochs setter int getVerbosityLevel() const { return verbosity_level_; }; ///< verbosity_level setter bool getLogTraining() const { return log_training_; }; bool getLogValidation() const { return log_validation_; }; bool getLogEvaluation() const { return log_evaluation_; }; std::vector<LossFunctionHelper<TensorT>> getLossFunctionHelpers() { return loss_function_helpers_; }; ///< loss_function_helpers getter [TODO: tests] std::vector<MetricFunctionHelper<TensorT>> getMetricFunctionHelpers() { return metric_function_helpers_; }; ///< metric_functions_helpers getter [TODO: tests] int getNTBPTTSteps() const { return n_TBPTT_steps_; }; ///< n_TBPTT setter int getNTETTSteps() const { return n_TETT_steps_; }; ///< n_TETT setter bool getFindCycles() { return find_cycles_; }; ///< find_cycles getter [TODO: tests] bool getFastInterpreter() { return fast_interpreter_; }; ///< fast_interpreter getter [TODO: tests] bool getPreserveOoO() { return preserve_OoO_; }; ///< preserve_OoO getter [TODO: tests] bool getInterpretModel() { return interpret_model_; }; ///< find_cycles getter [TODO: tests] bool getResetModel() { return reset_model_; }; ///< fast_interpreter getter [TODO: tests] bool getResetInterpreter() { return reset_interpreter_; }; ///< preserve_OoO getter [TODO: tests] std::vector<std::string> getLossOutputNodesLinearized() const; ///< Return a linearized vector of all loss output nodes std::vector<std::string> getMetricOutputNodesLinearized() const; ///< Return a linearized vector of all metric output nodes std::vector<std::string> getMetricNamesLinearized() const; ///< Return a linearized vector of all metric names int getNLossFunctions() const; ///< Return the number of loss functions int getNMetricFunctions() const; ///< Return the number of metric functions /** @brief Check input dimensions. @param n_epochs The number of training epochs @param input The input data @param batch_size The batch size of the nodes @param memory_size The memory size of the nodes @param input_nodes The node names @returns True on success, False if not */ bool checkInputData(const int& n_epochs, const Eigen::Tensor<TensorT, 4>& input, const int& batch_size, const int& memory_size, const std::vector<std::string>& input_nodes); /** @brief Check output dimensions. @param n_epochs The number of training epochs @param output The output data @param batch_size The batch size of the nodes @param output_nodes The node names @returns True on success, False if not */ bool checkOutputData(const int& n_epochs, const Eigen::Tensor<TensorT, 4>& output, const int& batch_size, const int& memory_size, const std::vector<std::string>& output_nodes); /** @brief Check time step dimensions required for FPTT. @param n_epochs The number of training epochs @param time_steps The time step spacing @param batch_size The batch size of the nodes @param memory_size The memory size of the nodes @returns True on success, False if not */ bool checkTimeSteps(const int& n_epochs, const Eigen::Tensor<TensorT, 3>& time_steps, const int& batch_size, const int& memory_size); /** @brief Check that all loss function members are of the same size @returns True on success, False if not */ bool checkLossFunctions(); /** @brief Check that all loss function members are of the same size @returns True on success, False if not */ bool checkMetricFunctions(); /** @brief Entry point for users to code their script for model training @param[in, out] model The model to train @param[in] input Input data tensor of dimensions: batch_size, memory_size, input_nodes, n_epochs @param[in] output Expected output data tensor of dimensions: batch_size, memory_size, output_nodes, n_epochs @param[in] time_steps Time steps of the forward passes of dimensions: batch_size, memory_size, n_epochs @param[in] input_nodes Input node names @param[in] model_logger Model logger to log training epochs @param[in] model_interpreter The model interpreter @returns vector of average model error scores */ virtual std::vector<TensorT> trainModel(Model<TensorT>& model, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter); /** @brief Entry point for users to code their script for model training Default workflow executes the following methods: 1. Model interpretation and tensor memory allocation 2. Validation data generation 3. Validation FPTT, CETT, and METT 4. Training data generation 5. Training FPTT, CETT, METT, BPTT, Weight update 6. Logging 7. Adaptive trainer scheduling @param[in, out] model The model to train @param[in] data_simulator The training, validation, and test data generator @param[in] input_nodes Input node names @param[in] model_logger Model logger to log training epochs @param[in] model_interpreter The model interpreter @returns vector of average model error scores from training and testing/validation */ virtual std::pair<std::vector<TensorT>, std::vector<TensorT>> trainModel(Model<TensorT>& model, DataSimulator<TensorT> &data_simulator, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter); /** @brief Entry point for users to code their script for model validation @param[in, out] model The model to train @param[in] model_resources The hardware available for training the model @param[in] input Input data tensor of dimensions: batch_size, memory_size, input_nodes, n_epochs @param[in] output Expected output data tensor of dimensions: batch_size, memory_size, output_nodes, n_epochs @param[in] time_steps Time steps of the forward passes of dimensions: batch_size, memory_size, n_epochs @param[in] input_nodes Input node names @returns vector of average model error scores */ virtual std::vector<TensorT> validateModel(Model<TensorT>& model, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter); /** @brief Entry point for users to code their script for model validation Same as modelTrainer except for the following: - Training BPTT and Weight update steps are omitted - adaptive trainer scheduling is omitted @param[in, out] model The model to train @param[in] data_simulator The training, validation, and test data generator @param[in] input_nodes Input node names @param[in] model_logger Model logger to log training epochs @param[in] model_interpreter The model interpreter @returns vector of average model error scores from training and testing/validation */ virtual std::pair<std::vector<TensorT>, std::vector<TensorT>> validateModel(Model<TensorT>& model, DataSimulator<TensorT> &data_simulator, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter); /** @brief Entry point for users to code their script for model forward evaluations @param[in, out] model The model to train @param[in] input Input data tensor of dimensions: batch_size, memory_size, input_nodes, n_epochs @param[in] time_steps Time steps of the forward passes of dimensions: batch_size, memory_size, n_epochs @param[in] input_nodes Input node names @returns Tensor of dims batch_size, memory_size, output_nodes, n_epochs (similar to input) */ virtual Eigen::Tensor<TensorT, 4> evaluateModel(Model<TensorT>& model, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter); /** @brief Entry point for users to code their script for model forward evaluations Default workflow executes the following methods: 1. Model interpretation and tensor memory allocation 2. Evaluation data generation 3. Evaluation FPTT, METT 4. Logging 5. Adaptive trainer scheduling @param[in, out] model The model to train @param[in] data_simulator The training, validation, and test data generator @param[in] input_nodes Input node names @returns Tensor of dims batch_size, memory_size, output_nodes, n_epochs (similar to input) */ virtual Eigen::Tensor<TensorT, 4> evaluateModel(Model<TensorT>& model, DataSimulator<TensorT> &data_simulator, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter); /** @brief Entry point for users to code their script to build the model @returns The constructed model */ virtual Model<TensorT> makeModel(); /** @brief Entry point for users to code their adaptive scheduler to modify training parameters based on a given trigger @param[in] n_generations The number of evolution generations @param[in] n_epochs The number of training/validation epochs @param[in, out] model The model @param[in, out] model_interpreter The model interpreter @param[in] model_errors The trace of model errors from training/validation */ virtual void adaptiveTrainerScheduler(const int& n_generations,const int& n_epochs,Model<TensorT>& model,InterpreterT& model_interpreter,const std::vector<TensorT>& model_errors); /** @brief Entry point for users to code their training logger [TODO: add tests] @param[in] n_generations The number of evolution generations @param[in] n_epochs The number of training/validation epochs @param[in, out] model The model @param[in, out] model_interpreter The model interpreter @param[in, out] model_logger The model logger @param[in] expected_values The expected values @param[in] output_nodes The output node names @param[in] input_nodes The input node names @param[in] model_error The model error */ virtual void trainingModelLogger(const int& n_epochs,Model<TensorT>& model,InterpreterT& model_interpreter,ModelLogger<TensorT>& model_logger,const Eigen::Tensor<TensorT, 3>& expected_values,const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes,const TensorT& model_error); /** @brief Entry point for users to code their training logger [TODO: add tests] @param[in] n_generations The number of evolution generations @param[in] n_epochs The number of training/validation epochs @param[in,out] model The model @param[in,out] model_interpreter The model interpreter @param[in,out] model_logger The model logger @param[in] expected_values The expected values @param[in] output_nodes The output node names @param[in] input_nodes The input node names @param[in] model_error_train @param[in] model_error_test @param[in] model_metrics_train @param[in] model_metrics_test */ virtual void trainingModelLogger(const int& n_epochs,Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger,const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes,const std::vector<std::string>& input_nodes,const TensorT& model_error_train, const TensorT& model_error_test,const Eigen::Tensor<TensorT, 1> & model_metrics_train, const Eigen::Tensor<TensorT, 1> & model_metrics_test); /** @brief Entry point for users to code their validation logger [TODO: add tests] @param[in] n_generations The number of evolution generations @param[in] n_epochs The number of training/validation epochs @param[in, out] model The model @param[in, out] model_interpreter The model interpreter @param[in, out] model_logger The model logger @param[in] expected_values The expected values @param[in] output_nodes The output node names @param[in] input_nodes The input node names @param[in] model_error The model error */ virtual void validationModelLogger(const int& n_epochs,Model<TensorT>& model,InterpreterT& model_interpreter,ModelLogger<TensorT>& model_logger,const Eigen::Tensor<TensorT, 3>& expected_values,const std::vector<std::string>& output_nodes,const std::vector<std::string>& input_nodes,const TensorT& model_error); /** @brief Entry point for users to code their validation logger [TODO: add tests] @param[in] n_generations The number of evolution generations @param[in] n_epochs The number of training/validation epochs @param[in, out] model The model @param[in, out] model_interpreter The model interpreter @param[in, out] model_logger The model logger @param[in] expected_values The expected values @param[in] output_nodes The output node names @param[in] input_nodes The input node names @param[in] model_error_train @param[in] model_error_test @param[in] model_metrics_train @param[in] model_metrics_test */ virtual void validationModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger,const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes,const std::vector<std::string>& input_nodes,const TensorT& model_error_train, const TensorT& model_error_test,const Eigen::Tensor<TensorT, 1> & model_metrics_train, const Eigen::Tensor<TensorT, 1> & model_metrics_test); /** @brief Entry point for users to code their evaluation logger [TODO: add tests] @param[in] n_generations The number of evolution generations @param[in] n_epochs The number of training/validation epochs @param[in, out] model The model @param[in, out] model_interpreter The model interpreter @param[in, out] model_logger The model logger @param[in] output_nodes The output node names @param[in] input_nodes The input node names */ virtual void evaluationModelLogger(const int& n_epochs,Model<TensorT>& model,InterpreterT& model_interpreter,ModelLogger<TensorT>& model_logger,const std::vector<std::string>& output_nodes,const std::vector<std::string>& input_nodes); /** @brief Entry point for users to code their training logger [TODO: add tests] @param[in] n_generations The number of evolution generations @param[in] n_epochs The number of training/validation epochs @param[in, out] model The model @param[in, out] model_interpreter The model interpreter @param[in, out] model_logger The model logger @param[in] expected_values The expected values @param[in] output_nodes The output node names @param[in] input_nodes The input node names @param[in] model_metrics The model metrics */ virtual void evaluationModelLogger(const int& n_epochs,Model<TensorT>& model, InterpreterT& model_interpreter,ModelLogger<TensorT>& model_logger,const Eigen::Tensor<TensorT, 3>& expected_values,const std::vector<std::string>& output_nodes,const std::vector<std::string>& input_nodes,const Eigen::Tensor<TensorT, 1>& model_metrics); /* @brief Determine the decay factor to reduce the learning rate by if the model_errors has not increases/decreased by a specified threshold for a period of epochs TODO: could be more nicely implemented as a functor? @param[in] model_errors The history of model errors @param[in] decay A scalar to multiple the current learning rate to get the new learning rate @param[in] cur_epoch The current epoch number @param[in] n_epochs_avg The number of epochs to determine the average model error @param[in] n_epochs_win The number of epochs to determine a recent window to compare to the average model error @param[in] min_per_error_diff The minimum percent error difference to change the learning rate @returns The decayed factor to change the learning rate */ TensorT reduceLROnPlateau(const std::vector<float>& model_errors, const TensorT& decay, const int& n_epochs_avg, const int& n_epochs_win, const TensorT& min_perc_error_diff); protected: void ApplyModelLosses_(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& output, InterpreterT& model_interpreter); ///< Apply the loss functions to each of the model output nodes void ApplyModelMetrics_(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& output, InterpreterT& model_interpreter); ///< Apply the metric functions to each of the model output nodes std::vector<LossFunctionHelper<TensorT>> loss_function_helpers_; std::vector<MetricFunctionHelper<TensorT>> metric_function_helpers_; private: int batch_size_ = 1; int memory_size_ = 1; int n_epochs_training_ = 0; int n_epochs_validation_ = 0; int n_epochs_evaluation_ = 0; int n_TBPTT_steps_ = -1; ///< the number of truncated back propogation through time steps int n_TETT_steps_ = -1; ///< the number of truncated error through time calculation steps int verbosity_level_ = 0; ///< level of verbosity (0=none, 1=test/validation errors, 2=test/validation node values bool log_training_ = false; ///< whether to log training epochs or not bool log_validation_ = false; ///< whether to log validation epochs or not bool log_evaluation_ = false; ///< whether to log evaluation epochs or not bool interpret_model_ = true; ///< whether to interpret the model and allocate associated Tensor memory for the model interpreter bool reset_model_ = true; ///< whether to reset the model at the end of training bool reset_interpreter_ = true; ///< whether to reset the model interpreter at the end of training bool find_cycles_ = true; ///< whether to find cycles prior to interpreting the model (see `ModelInterpreter`) bool fast_interpreter_ = false; ///< whether to skip certain checks when interpreting the model (see `ModelInterpreter`) bool preserve_OoO_ = true; ///< whether to preserve the order of operations (see `ModelInterpreter`) }; template<typename TensorT, typename InterpreterT> void ModelTrainer<TensorT, InterpreterT>::setLogging(bool log_training, bool log_validation, bool log_evaluation) { log_training_ = log_training; log_validation_ = log_validation; log_evaluation_ = log_evaluation; } template<typename TensorT, typename InterpreterT> inline std::vector<std::string> ModelTrainer<TensorT, InterpreterT>::getLossOutputNodesLinearized() const { std::vector<std::string> output_nodes; for (const auto& helper : this->loss_function_helpers_) for (const std::string& output_node : helper.output_nodes_) output_nodes.push_back(output_node); return output_nodes; } template<typename TensorT, typename InterpreterT> inline std::vector<std::string> ModelTrainer<TensorT, InterpreterT>::getMetricOutputNodesLinearized() const { std::vector<std::string> output_nodes; for (const auto& helper : this->metric_function_helpers_) for (const std::string& output_node : helper.output_nodes_) output_nodes.push_back(output_node); return output_nodes; } template<typename TensorT, typename InterpreterT> inline std::vector<std::string> ModelTrainer<TensorT, InterpreterT>::getMetricNamesLinearized() const { std::vector<std::string> metric_names; for (const auto& helper : this->metric_function_helpers_) for (const std::string& metric_name : helper.metric_names_) metric_names.push_back(metric_name); return metric_names; } template<typename TensorT, typename InterpreterT> inline int ModelTrainer<TensorT, InterpreterT>::getNLossFunctions() const { int cnt = 0; for (const auto& helper : this->loss_function_helpers_) cnt += helper.loss_functions_.size(); return cnt; } template<typename TensorT, typename InterpreterT> inline int ModelTrainer<TensorT, InterpreterT>::getNMetricFunctions() const { int cnt = 0; for (const auto& helper : this->metric_function_helpers_) cnt += helper.metric_functions_.size(); return cnt; } template<typename TensorT, typename InterpreterT> bool ModelTrainer<TensorT, InterpreterT>::checkInputData(const int& n_epochs, const Eigen::Tensor<TensorT, 4>& input, const int& batch_size, const int& memory_size, const std::vector<std::string>& input_nodes) { if (input.dimension(0) != batch_size) { printf("batch_size of %d is not compatible with the input dim 0 of %d\n", batch_size, (int)input.dimension(0)); return false; } else if (input.dimension(1) != memory_size) { printf("memory_size of %d is not compatible with the input dim 1 of %d\n", memory_size, (int)input.dimension(1)); return false; } else if (input.dimension(2) != input_nodes.size()) { printf("input_nodes size of %d is not compatible with the input dim 2 of %d\n", input_nodes.size(), (int)input.dimension(2)); return false; } else if (input.dimension(3) != n_epochs) { printf("n_epochs of %d is not compatible with the input dim 3 of %d\n", n_epochs, (int)input.dimension(3)); return false; } else { return true; } } template<typename TensorT, typename InterpreterT> bool ModelTrainer<TensorT, InterpreterT>::checkOutputData(const int& n_epochs, const Eigen::Tensor<TensorT, 4>& output, const int& batch_size, const int& memory_size, const std::vector<std::string>& output_nodes) { if (output.dimension(0) != batch_size) { printf("batch_size of %d is not compatible with the output dim 0 of %d\n", batch_size, (int)output.dimension(0)); return false; } else if (output.dimension(1) != memory_size) { printf("memory_size of %d is not compatible with the output dim 1 of %d\n", memory_size, (int)output.dimension(1)); return false; } else if (output.dimension(2) != output_nodes.size()) { printf("output_nodes size of %d is not compatible with the output dim 2 of %d\n", output_nodes.size(), (int)output.dimension(2)); return false; } else if (output.dimension(3) != n_epochs) { printf("n_epochs of %d is not compatible with the output dim 3 of %d\n", n_epochs, (int)output.dimension(3)); return false; } else { return true; } } template<typename TensorT, typename InterpreterT> bool ModelTrainer<TensorT, InterpreterT>::checkTimeSteps(const int & n_epochs, const Eigen::Tensor<TensorT, 3>& time_steps, const int & batch_size, const int & memory_size) { if (time_steps.dimension(0) != batch_size) { printf("batch_size of %d is not compatible with the time_steps dim 0 of %d\n", batch_size, (int)time_steps.dimension(0)); return false; } else if (time_steps.dimension(1) != memory_size) { printf("memory_size of %d is not compatible with the time_steps dim 1 of %d\n", memory_size, (int)time_steps.dimension(1)); return false; } else if (time_steps.dimension(2) != n_epochs) { printf("n_epochs of %d is not compatible with the time_steps dim 3 of %d\n", n_epochs, (int)time_steps.dimension(2)); return false; } else { return true; } } template<typename TensorT, typename InterpreterT> inline bool ModelTrainer<TensorT, InterpreterT>::checkLossFunctions() { if (loss_function_helpers_.size() == 0) { std::cout << "No loss function helpers have been set!" << std::endl; return false; } for (const auto& helper : loss_function_helpers_) { if (helper.loss_functions_.size() != helper.loss_function_grads_.size()) { std::cout << "Loss functions and loss functions gradients are not of consistent length!" << std::endl; return false; } if (helper.output_nodes_.size() == 0) { std::cout << "Loss function nodes have not been set!" << std::endl; return false; } } return true; } template<typename TensorT, typename InterpreterT> inline bool ModelTrainer<TensorT, InterpreterT>::checkMetricFunctions() { if (metric_function_helpers_.size() == 0) { std::cout << "No metric function helpers have been set!" << std::endl; //return false; } for (const auto& helper : metric_function_helpers_) { if (helper.metric_functions_.size() != helper.metric_names_.size()) { std::cout << "Metric functions and metric functions names are not of consistent lengths!" << std::endl; return false; } if (helper.output_nodes_.size() == 0) { std::cout << "Metric function nodes have not been set!" << std::endl; return false; } } return true; } template<typename TensorT, typename InterpreterT> inline std::vector<TensorT> ModelTrainer<TensorT, InterpreterT>::trainModel(Model<TensorT>& model, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter) { std::vector<TensorT> model_error; // Check input and output data if (!this->checkInputData(this->getNEpochsTraining(), input, this->getBatchSize(), this->getMemorySize(), input_nodes)) { return model_error; } std::vector<std::string> output_nodes = this->getLossOutputNodesLinearized(); if (!this->checkOutputData(this->getNEpochsTraining(), output, this->getBatchSize(), this->getMemorySize(), output_nodes)) { return model_error; } if (!this->checkTimeSteps(this->getNEpochsTraining(), time_steps, this->getBatchSize(), this->getMemorySize())) { return model_error; } if (!model.checkNodeNames(input_nodes)) { return model_error; } if (!model.checkNodeNames(output_nodes)) { return model_error; } // Initialize the model if (this->getVerbosityLevel() >= 2) std::cout << "Intializing the model..." << std::endl; if (this->getFindCycles()) model.findCycles(); // compile the graph into a set of operations and allocate all tensors if (this->getInterpretModel()) { if (this->getVerbosityLevel() >= 2) std::cout << "Interpreting the model..." << std::endl; model_interpreter.checkMemory(model, this->getBatchSize(), this->getMemorySize()); model_interpreter.getForwardPropogationOperations(model, this->getBatchSize(), this->getMemorySize(), true, this->getFastInterpreter(), this->getFindCycles(), this->getPreserveOoO()); model_interpreter.allocateModelErrorTensor(this->getBatchSize(), this->getMemorySize(), this->getNMetricFunctions()); } for (int iter = 0; iter < this->getNEpochsTraining(); ++iter) // use n_epochs here { // update the model hyperparameters this->adaptiveTrainerScheduler(0, iter, model, model_interpreter, model_error); // assign the input data model_interpreter.initBiases(model); // create the bias model_interpreter.mapValuesToLayers(model, input.chip(iter, 3), input_nodes, "output"); // Needed for OoO/IG with DAG and DCG model_interpreter.mapValuesToLayers(model, input.chip(iter, 3), input_nodes, "input"); // Needed for OoO/IG with DAG and DCG // forward propogate if (this->getVerbosityLevel() >= 2) std::cout << "Foward Propogation..." << std::endl; model_interpreter.FPTT(this->getMemorySize()); // calculate the model error and node output if (this->getVerbosityLevel() >= 2) std::cout << "Error Calculation..." << std::endl; const Eigen::Tensor<TensorT, 3> expected_tmp = output.chip(iter, 3); this->ApplyModelLosses_(model, expected_tmp, model_interpreter); // back propogate if (this->getVerbosityLevel() >= 2) std::cout << "Back Propogation..." << std::endl; if (this->getNTBPTTSteps() < 0) model_interpreter.TBPTT(this->getMemorySize()); else model_interpreter.TBPTT(this->getNTBPTTSteps()); // update the weights if (this->getVerbosityLevel() >= 2) std::cout << "Weight Update..." << std::endl; model_interpreter.updateWeights(iter); model_interpreter.getModelResults(model, false, false, true, false); const Eigen::Tensor<TensorT, 0> total_error = model.getError().sum(); model_error.push_back(total_error(0)); if (this->getVerbosityLevel() >= 1) std::cout << "Model " << model.getName() << " error: " << total_error(0) << std::endl; // log epoch if (this->getLogTraining()) { if (this->getVerbosityLevel() >= 2) std::cout << "Logging..." << std::endl; this->trainingModelLogger(iter, model, model_interpreter, model_logger, output.chip(iter, 3), output_nodes, input_nodes, total_error(0)); } // reinitialize the model if (iter != this->getNEpochsTraining() - 1) { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } } // copy out results model_interpreter.getModelResults(model, true, true, true, false); if (this->getResetInterpreter()) { model_interpreter.clear_cache(); } else { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } if (this->getResetModel()) { model.initNodeTensorIndices(); model.initWeightTensorIndices(); } return model_error; } template<typename TensorT, typename InterpreterT> inline std::pair<std::vector<TensorT>, std::vector<TensorT>> ModelTrainer<TensorT, InterpreterT>::trainModel(Model<TensorT>& model, DataSimulator<TensorT>& data_simulator, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT & model_interpreter) { std::vector<TensorT> model_error_training; //model_error_training.reserve(this->getNEpochsTraining()); // FIXME: uncomment std::vector<TensorT> model_error_validation; //model_error_validation.reserve(this->getNEpochsTraining()); // FIXME: uncomment //std::vector<Eigen::Tensor<TensorT, 1>> model_metrics_training; //model_metrics_training.reserve(this->getNEpochsTraining()); //std::vector<Eigen::Tensor<TensorT, 1>> model_metrics_validation; //model_metrics_validation.reserve(this->getNEpochsTraining()); // Check the loss and metric functions if (!this->checkLossFunctions()) { return std::make_pair(model_error_training, model_error_validation); } if (!this->checkMetricFunctions()) { return std::make_pair(model_error_training, model_error_validation); } // Check the input node names if (!model.checkNodeNames(input_nodes)) { return std::make_pair(model_error_training, model_error_validation); } // Check the loss output node names std::vector<std::string> loss_output_nodes = this->getLossOutputNodesLinearized(); if (!model.checkNodeNames(loss_output_nodes)) { return std::make_pair(model_error_training, model_error_validation); } // Check the metric output node names std::vector<std::string> metric_output_nodes = this->getMetricOutputNodesLinearized(); if (!model.checkNodeNames(metric_output_nodes)) { return std::make_pair(model_error_training, model_error_validation); } // Initialize the model if (this->getVerbosityLevel() >= 2) std::cout << "Intializing the model..." << std::endl; if (this->getFindCycles()) model.findCycles(); // compile the graph into a set of operations and allocate all tensors if (this->getInterpretModel()) { if (this->getVerbosityLevel() >= 2) std::cout << "Interpreting the model..." << std::endl; model_interpreter.checkMemory(model, this->getBatchSize(), this->getMemorySize()); model_interpreter.getForwardPropogationOperations(model, this->getBatchSize(), this->getMemorySize(), true, this->getFastInterpreter(), this->getFindCycles(), this->getPreserveOoO()); model_interpreter.allocateModelErrorTensor(this->getBatchSize(), this->getMemorySize(), this->getNMetricFunctions()); } for (int iter = 0; iter < this->getNEpochsTraining(); ++iter) // use n_epochs here { // update the model hyperparameters this->adaptiveTrainerScheduler(0, iter, model, model_interpreter, model_error_training); // Generate the input and output data for validation if (this->getVerbosityLevel() >= 2) std::cout << "Generating the input/output data for validation..." << std::endl; Eigen::Tensor<TensorT, 3> input_data_validation(this->getBatchSize(), this->getMemorySize(), (int)input_nodes.size()); Eigen::Tensor<TensorT, 3> loss_output_data_validation(this->getBatchSize(), this->getMemorySize(), (int)loss_output_nodes.size()); Eigen::Tensor<TensorT, 3> metric_output_data_validation(this->getBatchSize(), this->getMemorySize(), (int)metric_output_nodes.size()); Eigen::Tensor<TensorT, 2> time_steps_validation(this->getBatchSize(), this->getMemorySize()); data_simulator.simulateValidationData(input_data_validation, loss_output_data_validation, metric_output_data_validation, time_steps_validation); // assign the input data model_interpreter.initBiases(model); // create the bias model_interpreter.mapValuesToLayers(model, input_data_validation, input_nodes, "output"); // Needed for OoO/IG with DAG and DCG model_interpreter.mapValuesToLayers(model, input_data_validation, input_nodes, "input"); // Needed for OoO/IG with DAG and DCG // forward propogate if (this->getVerbosityLevel() >= 2) std::cout << "Validation Foward Propogation..." << std::endl; model_interpreter.FPTT(this->getMemorySize()); // calculate the model error and node output if (this->getVerbosityLevel() >= 2) std::cout << "Validation Error Calculation..." << std::endl; this->ApplyModelLosses_(model, loss_output_data_validation, model_interpreter); // calculate the model metrics if (this->getVerbosityLevel() >= 2) std::cout << "Validation Metric Calculation..." << std::endl; this->ApplyModelMetrics_(model, metric_output_data_validation, model_interpreter); // get the model validation error and validation metrics model_interpreter.getModelResults(model, false, false, true, false); const Eigen::Tensor<TensorT, 0> total_error_validation = model.getError().sum(); //model_error_validation.push_back(total_error_validation(0)); // FIXME: uncomment Eigen::Tensor<TensorT, 1> total_metrics_validation = model.getMetric().sum(Eigen::array<Eigen::Index, 1>({ 1 })); //model_metrics_validation.push_back(total_metrics_validation); // re-initialize the model model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); // Generate the input and output data for training if (this->getVerbosityLevel() >= 2) std::cout << "Generating the input/output data for training..." << std::endl; Eigen::Tensor<TensorT, 3> input_data_training(this->getBatchSize(), this->getMemorySize(), (int)input_nodes.size()); Eigen::Tensor<TensorT, 3> loss_output_data_training(this->getBatchSize(), this->getMemorySize(), (int)loss_output_nodes.size()); Eigen::Tensor<TensorT, 3> metric_output_data_training(this->getBatchSize(), this->getMemorySize(), (int)metric_output_nodes.size()); Eigen::Tensor<TensorT, 2> time_steps_training(this->getBatchSize(), this->getMemorySize()); data_simulator.simulateTrainingData(input_data_training, loss_output_data_training, metric_output_data_training, time_steps_training); // assign the input data model_interpreter.initBiases(model); // create the bias model_interpreter.mapValuesToLayers(model, input_data_training, input_nodes, "output"); // Needed for OoO/IG with DAG and DCG model_interpreter.mapValuesToLayers(model, input_data_training, input_nodes, "input"); // Needed for OoO/IG with DAG and DCG // forward propogate if (this->getVerbosityLevel() >= 2) std::cout << "Training Foward Propogation..." << std::endl; model_interpreter.FPTT(this->getMemorySize()); // calculate the model error and node output if (this->getVerbosityLevel() >= 2) std::cout << "Training Error Calculation..." << std::endl; this->ApplyModelLosses_(model, loss_output_data_training, model_interpreter); // calculate the model metrics if (this->getVerbosityLevel() >= 2) std::cout << "Training Metric Calculation..." << std::endl; this->ApplyModelMetrics_(model, metric_output_data_training, model_interpreter); // get the model training error model_interpreter.getModelResults(model, false, false, true, false); const Eigen::Tensor<TensorT, 0> total_error_training = model.getError().sum(); //model_error_training.push_back(total_error_training(0)); // FIXME: uncomment const Eigen::Tensor<TensorT, 1> total_metrics_training = model.getMetric().sum(Eigen::array<Eigen::Index, 1>({ 1 })); //model_metrics_training.push_back(total_metrics_training); if (this->getVerbosityLevel() >= 1) std::cout << "Model " << model.getName() << " error: " << total_error_training(0) << std::endl; // back propogate if (this->getVerbosityLevel() >= 2) std::cout << "Back Propogation..." << std::endl; if (this->getNTBPTTSteps() < 0) model_interpreter.TBPTT(this->getMemorySize()); else model_interpreter.TBPTT(this->getNTBPTTSteps()); // update the weights if (this->getVerbosityLevel() >= 2) std::cout << "Weight Update..." << std::endl; model_interpreter.updateWeights(iter); // log epoch if (this->getLogTraining()) { if (this->getVerbosityLevel() >= 2) std::cout << "Logging..." << std::endl; this->trainingModelLogger(iter, model, model_interpreter, model_logger, loss_output_data_training, loss_output_nodes, input_nodes, total_error_training(0), total_error_validation(0), total_metrics_training, total_metrics_validation); } // reinitialize the model if (iter != this->getNEpochsTraining() - 1) { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } } // copy out results model_interpreter.getModelResults(model, true, true, true, false); // initialize the caches and reset the model (if desired) if (this->getResetInterpreter()) { model_interpreter.clear_cache(); } else { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } if (this->getResetModel()) { model.initNodeTensorIndices(); model.initWeightTensorIndices(); } return std::make_pair(model_error_training, model_error_validation); } template<typename TensorT, typename InterpreterT> inline std::vector<TensorT> ModelTrainer<TensorT, InterpreterT>::validateModel(Model<TensorT>& model, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 4>& output, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter) { std::vector<TensorT> model_error; // Check input and output data if (!this->checkInputData(this->getNEpochsValidation(), input, this->getBatchSize(), this->getMemorySize(), input_nodes)) { return model_error; } std::vector<std::string> output_nodes = this->getLossOutputNodesLinearized(); if (!this->checkOutputData(this->getNEpochsValidation(), output, this->getBatchSize(), this->getMemorySize(), output_nodes)) { return model_error; } if (!this->checkTimeSteps(this->getNEpochsValidation(), time_steps, this->getBatchSize(), this->getMemorySize())) { return model_error; } if (!model.checkNodeNames(input_nodes)) { return model_error; } if (!model.checkNodeNames(output_nodes)) { return model_error; } // Initialize the model if (this->getFindCycles()) model.findCycles(); // compile the graph into a set of operations and allocate all tensors if (this->getInterpretModel()) { if (this->getVerbosityLevel() >= 2) std::cout << "Interpreting the model..." << std::endl; model_interpreter.checkMemory(model, this->getBatchSize(), this->getMemorySize()); model_interpreter.getForwardPropogationOperations(model, this->getBatchSize(), this->getMemorySize(), true, this->getFastInterpreter(), this->getFindCycles(), this->getPreserveOoO()); model_interpreter.allocateModelErrorTensor(this->getBatchSize(), this->getMemorySize(), this->getNMetricFunctions()); } for (int iter = 0; iter < this->getNEpochsValidation(); ++iter) // use n_epochs here { // assign the input data model_interpreter.initBiases(model); // create the bias model_interpreter.mapValuesToLayers(model, input.chip(iter, 3), input_nodes, "output"); // Needed for OoO/IG with DAG and DCG model_interpreter.mapValuesToLayers(model, input.chip(iter, 3), input_nodes, "input"); // Needed for IG with DAG and DCG // forward propogate if (this->getVerbosityLevel() >= 2) std::cout << "Foward Propogation..." << std::endl; model_interpreter.FPTT(this->getMemorySize()); // calculate the model error and node output if (this->getVerbosityLevel() >= 2) std::cout << "Error Calculation..." << std::endl; Eigen::Tensor<TensorT, 3> expected_tmp = output.chip(iter, 3); this->ApplyModelLosses_(model, expected_tmp, model_interpreter); model_interpreter.getModelResults(model, false, false, true, false); const Eigen::Tensor<TensorT, 0> total_error = model.getError().sum(); model_error.push_back(total_error(0)); if (this->getVerbosityLevel() >= 1) std::cout << "Model " << model.getName() << " error: " << total_error(0) << std::endl; // log epoch if (this->getLogValidation()) { if (this->getVerbosityLevel() >= 2) std::cout << "Logging..." << std::endl; this->validationModelLogger(iter, model, model_interpreter, model_logger, output.chip(iter, 3), output_nodes, input_nodes, total_error(0)); } // reinitialize the model if (iter != this->getNEpochsValidation() - 1) { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } } // copy out results model_interpreter.getModelResults(model, true, true, true, false); // initialize the caches and reset the model (if desired) if (this->getResetInterpreter()) { model_interpreter.clear_cache(); } else { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } if (this->getResetModel()) { model.initNodeTensorIndices(); model.initWeightTensorIndices(); } return model_error; } template<typename TensorT, typename InterpreterT> inline std::pair<std::vector<TensorT>, std::vector<TensorT>> ModelTrainer<TensorT, InterpreterT>::validateModel(Model<TensorT>& model, DataSimulator<TensorT>& data_simulator, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT & model_interpreter) { std::vector<TensorT> model_error_training; std::vector<TensorT> model_error_validation; std::vector<Eigen::Tensor<TensorT, 1>> model_metrics_training; /// metric values std::vector<Eigen::Tensor<TensorT, 1>> model_metrics_validation; // Check the loss and metric functions if (!this->checkLossFunctions()) { return std::make_pair(model_error_training, model_error_validation); } if (!this->checkMetricFunctions()) { return std::make_pair(model_error_training, model_error_validation); } // Check the input node names if (!model.checkNodeNames(input_nodes)) { return std::make_pair(model_error_training, model_error_validation); } // Check the loss output node names std::vector<std::string> loss_output_nodes = this->getLossOutputNodesLinearized(); if (!model.checkNodeNames(loss_output_nodes)) { return std::make_pair(model_error_training, model_error_validation); } // Check the metric output node names std::vector<std::string> metric_output_nodes = this->getMetricOutputNodesLinearized(); if (!model.checkNodeNames(metric_output_nodes)) { return std::make_pair(model_error_training, model_error_validation); } // Initialize the model if (this->getVerbosityLevel() >= 2) std::cout << "Intializing the model..." << std::endl; if (this->getFindCycles()) model.findCycles(); // compile the graph into a set of operations and allocate all tensors if (this->getInterpretModel()) { if (this->getVerbosityLevel() >= 2) std::cout << "Interpreting the model..." << std::endl; model_interpreter.checkMemory(model, this->getBatchSize(), this->getMemorySize()); model_interpreter.getForwardPropogationOperations(model, this->getBatchSize(), this->getMemorySize(), true, this->getFastInterpreter(), this->getFindCycles(), this->getPreserveOoO()); model_interpreter.allocateModelErrorTensor(this->getBatchSize(), this->getMemorySize(), this->getNMetricFunctions()); } for (int iter = 0; iter < this->getNEpochsValidation(); ++iter) // use n_epochs here { // Generate the input and output data for validation if (this->getVerbosityLevel() >= 2) std::cout << "Generating the input/output data for validation..." << std::endl; Eigen::Tensor<TensorT, 3> input_data_validation(this->getBatchSize(), this->getMemorySize(), (int)input_nodes.size()); Eigen::Tensor<TensorT, 3> loss_output_data_validation(this->getBatchSize(), this->getMemorySize(), (int)loss_output_nodes.size()); Eigen::Tensor<TensorT, 3> metric_output_data_validation(this->getBatchSize(), this->getMemorySize(), (int)metric_output_nodes.size()); Eigen::Tensor<TensorT, 2> time_steps_validation(this->getBatchSize(), this->getMemorySize()); data_simulator.simulateValidationData(input_data_validation, loss_output_data_validation, metric_output_data_validation, time_steps_validation); // assign the input data model_interpreter.initBiases(model); // create the bias model_interpreter.mapValuesToLayers(model, input_data_validation, input_nodes, "output"); // Needed for OoO/IG with DAG and DCG model_interpreter.mapValuesToLayers(model, input_data_validation, input_nodes, "input"); // Needed for OoO/IG with DAG and DCG // forward propogate if (this->getVerbosityLevel() >= 2) std::cout << "Validation Foward Propogation..." << std::endl; model_interpreter.FPTT(this->getMemorySize()); // calculate the model error and node output if (this->getVerbosityLevel() >= 2) std::cout << "Validation Error Calculation..." << std::endl; this->ApplyModelLosses_(model, loss_output_data_validation, model_interpreter); // calculate the model metrics if (this->getVerbosityLevel() >= 2) std::cout << "Validation Metric Calculation..." << std::endl; this->ApplyModelMetrics_(model, metric_output_data_validation, model_interpreter); // get the model validation error and validation metrics model_interpreter.getModelResults(model, false, false, true, false); const Eigen::Tensor<TensorT, 0> total_error_validation = model.getError().sum(); model_error_validation.push_back(total_error_validation(0)); Eigen::Tensor<TensorT, 1> total_metrics_validation = model.getMetric().sum(Eigen::array<Eigen::Index, 1>({ 1 })); model_metrics_validation.push_back(total_metrics_validation); // re-initialize the model model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); // Generate the input and output data for training if (this->getVerbosityLevel() >= 2) std::cout << "Generating the input/output data for training..." << std::endl; Eigen::Tensor<TensorT, 3> input_data_training(this->getBatchSize(), this->getMemorySize(), (int)input_nodes.size()); Eigen::Tensor<TensorT, 3> loss_output_data_training(this->getBatchSize(), this->getMemorySize(), (int)loss_output_nodes.size()); Eigen::Tensor<TensorT, 3> metric_output_data_training(this->getBatchSize(), this->getMemorySize(), (int)metric_output_nodes.size()); Eigen::Tensor<TensorT, 2> time_steps_training(this->getBatchSize(), this->getMemorySize()); data_simulator.simulateTrainingData(input_data_training, loss_output_data_training, metric_output_data_training, time_steps_training); // assign the input data model_interpreter.initBiases(model); // create the bias model_interpreter.mapValuesToLayers(model, input_data_training, input_nodes, "output"); // Needed for OoO/IG with DAG and DCG model_interpreter.mapValuesToLayers(model, input_data_training, input_nodes, "input"); // Needed for OoO/IG with DAG and DCG // forward propogate if (this->getVerbosityLevel() >= 2) std::cout << "Training Foward Propogation..." << std::endl; model_interpreter.FPTT(this->getMemorySize()); // calculate the model error and node output if (this->getVerbosityLevel() >= 2) std::cout << "Training Error Calculation..." << std::endl; this->ApplyModelLosses_(model, loss_output_data_training, model_interpreter); // calculate the model metrics if (this->getVerbosityLevel() >= 2) std::cout << "Training Metric Calculation..." << std::endl; this->ApplyModelMetrics_(model, metric_output_data_training, model_interpreter); // get the model training error model_interpreter.getModelResults(model, false, false, true, false); const Eigen::Tensor<TensorT, 0> total_error_training = model.getError().sum(); model_error_training.push_back(total_error_training(0)); const Eigen::Tensor<TensorT, 1> total_metrics_training = model.getMetric().sum(Eigen::array<Eigen::Index, 1>({ 1 })); model_metrics_training.push_back(total_metrics_training); if (this->getVerbosityLevel() >= 1) std::cout << "Model " << model.getName() << " error: " << total_error_training(0) << std::endl; // log epoch if (this->getLogValidation()) { if (this->getVerbosityLevel() >= 2) std::cout << "Logging..." << std::endl; this->validationModelLogger(iter, model, model_interpreter, model_logger, loss_output_data_training, loss_output_nodes, input_nodes, total_error_training(0), total_error_validation(0), total_metrics_training, total_metrics_validation); } // reinitialize the model if (iter != this->getNEpochsValidation() - 1) { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } } // copy out results model_interpreter.getModelResults(model, true, true, true, false); // initialize the caches and reset the model (if desired) if (this->getResetInterpreter()) { model_interpreter.clear_cache(); } else { model_interpreter.reInitNodes(); model_interpreter.reInitModelError(); } if (this->getResetModel()) { model.initNodeTensorIndices(); model.initWeightTensorIndices(); } return std::make_pair(model_error_training, model_error_validation); } template<typename TensorT, typename InterpreterT> inline Eigen::Tensor<TensorT, 4> ModelTrainer<TensorT, InterpreterT>::evaluateModel(Model<TensorT>& model, const Eigen::Tensor<TensorT, 4>& input, const Eigen::Tensor<TensorT, 3>& time_steps, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT& model_interpreter) { std::vector<std::string> output_nodes = this->getLossOutputNodesLinearized(); Eigen::Tensor<TensorT, 4> model_output(this->getBatchSize(), this->getMemorySize(), (int)output_nodes.size(), this->getNEpochsEvaluation()); // for each epoch, for each output node, batch_size x memory_size // Check input data if (!this->checkInputData(this->getNEpochsEvaluation(), input, this->getBatchSize(), this->getMemorySize(), input_nodes)) { return model_output; } if (!this->checkTimeSteps(this->getNEpochsEvaluation(), time_steps, this->getBatchSize(), this->getMemorySize())) { return model_output; } if (!model.checkNodeNames(input_nodes)) { return model_output; } if (!model.checkNodeNames(output_nodes)) { return model_output; } // Initialize the model if (this->getFindCycles()) model.findCycles(); // compile the graph into a set of operations and allocate all tensors if (this->getInterpretModel()) { if (this->getVerbosityLevel() >= 2) std::cout << "Interpreting the model..." << std::endl; model_interpreter.checkMemory(model, this->getBatchSize(), this->getMemorySize()); model_interpreter.getForwardPropogationOperations(model, this->getBatchSize(), this->getMemorySize(), true, this->getFastInterpreter(), this->getFindCycles(), this->getPreserveOoO()); } for (int iter = 0; iter < this->getNEpochsEvaluation(); ++iter) // use n_epochs here { // assign the input data model_interpreter.initBiases(model); // create the bias model_interpreter.mapValuesToLayers(model, input.chip(iter, 3), input_nodes, "output"); // Needed for OoO/IG with DAG and DCG model_interpreter.mapValuesToLayers(model, input.chip(iter, 3), input_nodes, "input"); // Needed for IG with DAG and DCG // forward propogate if (this->getVerbosityLevel() >= 2) std::cout << "Foward Propogation..." << std::endl; model_interpreter.FPTT(this->getMemorySize()); // extract out the model output model_interpreter.getModelResults(model, true, false, false, false); std::vector<Eigen::Tensor<TensorT, 2>> output; int node_iter = 0; for (const std::string& output_node : output_nodes) { for (int batch_iter = 0; batch_iter < this->getBatchSize(); ++batch_iter) { for (int memory_iter = 0; memory_iter < this->getMemorySize(); ++memory_iter) { model_output(batch_iter, memory_iter, node_iter, iter) = model.getNodesMap().at(output_node)->getOutput()(batch_iter, memory_iter); } } ++node_iter; } // log epoch if (this->getLogEvaluation()) { if (this->getVerbosityLevel() >= 2) std::cout << "Logging..." << std::endl; this->evaluationModelLogger(iter, model, model_interpreter, model_logger, output_nodes, input_nodes); } // reinitialize the model if (iter != this->getNEpochsEvaluation() - 1) { model_interpreter.reInitNodes(); } } // copy out results model_interpreter.getModelResults(model, true, true, false, false); if (this->getResetInterpreter()) { model_interpreter.clear_cache(); } else { model_interpreter.reInitNodes(); } if (this->getResetModel()) { model.initNodeTensorIndices(); model.initWeightTensorIndices(); } return model_output; } template<typename TensorT, typename InterpreterT> inline Eigen::Tensor<TensorT, 4> ModelTrainer<TensorT, InterpreterT>::evaluateModel(Model<TensorT>& model, DataSimulator<TensorT>& data_simulator, const std::vector<std::string>& input_nodes, ModelLogger<TensorT>& model_logger, InterpreterT & model_interpreter) { std::vector<std::string> output_nodes = this->getLossOutputNodesLinearized(); Eigen::Tensor<TensorT, 4> model_output(this->getBatchSize(), this->getMemorySize(), (int)output_nodes.size(), this->getNEpochsEvaluation()); // for each epoch, for each output node, batch_size x memory_size // Check the loss and metric functions if (!this->checkMetricFunctions()) { return model_output; } // Check inputs if (!model.checkNodeNames(input_nodes)) { return model_output; } if (!model.checkNodeNames(output_nodes)) { return model_output; } // Check the metric output node names std::vector<std::string> metric_output_nodes = this->getMetricOutputNodesLinearized(); if (!model.checkNodeNames(metric_output_nodes)) { return model_output; } // Initialize the model if (this->getFindCycles()) model.findCycles(); // compile the graph into a set of operations and allocate all tensors if (this->getInterpretModel()) { if (this->getVerbosityLevel() >= 2) std::cout << "Interpreting the model..." << std::endl; model_interpreter.checkMemory(model, this->getBatchSize(), this->getMemorySize()); model_interpreter.getForwardPropogationOperations(model, this->getBatchSize(), this->getMemorySize(), true, this->getFastInterpreter(), this->getFindCycles(), this->getPreserveOoO()); model_interpreter.allocateModelErrorTensor(this->getBatchSize(), this->getMemorySize(), this->getNMetricFunctions()); } for (int iter = 0; iter < this->getNEpochsEvaluation(); ++iter) // use n_epochs here { // Generate the input and output data for evaluation if (this->getVerbosityLevel() >= 2) std::cout << "Generating the input/output data for evaluation..." << std::endl; Eigen::Tensor<TensorT, 3> input_data(this->getBatchSize(), this->getMemorySize(), (int)input_nodes.size()); Eigen::Tensor<TensorT, 3> metric_output_data(this->getBatchSize(), this->getMemorySize(), (int)metric_output_nodes.size()); Eigen::Tensor<TensorT, 2> time_steps(this->getBatchSize(), this->getMemorySize()); data_simulator.simulateEvaluationData(input_data, metric_output_data, time_steps); // assign the input data model_interpreter.initBiases(model); // create the bias model_interpreter.mapValuesToLayers(model, input_data, input_nodes, "output"); // Needed for OoO/IG with DAG and DCG model_interpreter.mapValuesToLayers(model, input_data, input_nodes, "input"); // Needed for IG with DAG and DCG // forward propogate if (this->getVerbosityLevel() >= 2) std::cout << "Foward Propogation..." << std::endl; model_interpreter.FPTT(this->getMemorySize()); // calculate the model metrics if (this->getVerbosityLevel() >= 2) std::cout << "Metric Calculation..." << std::endl; this->ApplyModelMetrics_(model, metric_output_data, model_interpreter); // get the model metrics model_interpreter.getModelResults(model, false, false, true, false); Eigen::Tensor<TensorT, 1> total_metrics = model.getMetric().sum(Eigen::array<Eigen::Index, 1>({ 1 })); // extract out the model output model_interpreter.getModelResults(model, true, false, false, false); int node_iter = 0; for (const std::string& output_node : output_nodes) { for (int batch_iter = 0; batch_iter < this->getBatchSize(); ++batch_iter) { for (int memory_iter = 0; memory_iter < this->getMemorySize(); ++memory_iter) { model_output(batch_iter, memory_iter, node_iter, iter) = model.getNodesMap().at(output_node)->getOutput()(batch_iter, memory_iter); } } ++node_iter; } // log epoch if (this->getLogEvaluation()) { if (this->getVerbosityLevel() >= 2) std::cout << "Logging..." << std::endl; this->evaluationModelLogger(iter, model, model_interpreter, model_logger, metric_output_data, output_nodes, input_nodes, total_metrics); } // reinitialize the model if (iter != this->getNEpochsEvaluation() - 1) { model_interpreter.reInitNodes(); } } // copy out results model_interpreter.getModelResults(model, true, true, false, false); if (this->getResetInterpreter()) { model_interpreter.clear_cache(); } else { model_interpreter.reInitNodes(); } if (this->getResetModel()) { model.initNodeTensorIndices(); model.initWeightTensorIndices(); } return model_output; } template<typename TensorT, typename InterpreterT> inline Model<TensorT> ModelTrainer<TensorT, InterpreterT>::makeModel() { return Model<TensorT>(); // USER: create your own overload method } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::adaptiveTrainerScheduler(const int & n_generations, const int & n_epochs, Model<TensorT>& model, InterpreterT & model_interpreter, const std::vector<TensorT>& model_errors) { // USER: create your own overload method } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::trainingModelLogger(const int & n_epochs, Model<TensorT>& model, InterpreterT & model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error) { if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 10 == 0) { if (model_logger.getLogExpectedEpoch() || model_logger.getLogNodeOutputsEpoch()) model_interpreter.getModelResults(model, true, false, false, false); if (model_logger.getLogNodeInputsEpoch()) model_interpreter.getModelResults(model, false, false, false, true); model_logger.writeLogs(model, n_epochs, { "Error" }, {}, { model_error }, {}, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::trainingModelLogger(const int & n_epochs, Model<TensorT>& model, InterpreterT & model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT & model_error_train, const TensorT & model_error_test, const Eigen::Tensor<TensorT, 1> & model_metrics_train, const Eigen::Tensor<TensorT, 1> & model_metrics_test) { if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 10 == 0) { // Get the node values if logging the expected and predicted if (model_logger.getLogExpectedEpoch() || model_logger.getLogNodeOutputsEpoch()) model_interpreter.getModelResults(model, true, false, false, false); if (model_logger.getLogNodeInputsEpoch()) model_interpreter.getModelResults(model, false, false, false, true); // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->getMetricNamesLinearized()) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::validationModelLogger(const int & n_epochs, Model<TensorT>& model, InterpreterT & model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error) { if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 10 == 0) { if (model_logger.getLogExpectedEpoch() || model_logger.getLogNodeOutputsEpoch()) model_interpreter.getModelResults(model, true, false, false, false); if (model_logger.getLogNodeInputsEpoch()) model_interpreter.getModelResults(model, false, false, false, true); model_logger.writeLogs(model, n_epochs, {}, { "Error" }, {}, { model_error }, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::validationModelLogger(const int & n_epochs, Model<TensorT>& model, InterpreterT & model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT & model_error_train, const TensorT & model_error_test, const Eigen::Tensor<TensorT, 1> & model_metrics_train, const Eigen::Tensor<TensorT, 1> & model_metrics_test) { if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 10 == 0) { // Get the node values if logging the expected and predicted if (model_logger.getLogExpectedEpoch() || model_logger.getLogNodeOutputsEpoch()) model_interpreter.getModelResults(model, true, false, false, false); if (model_logger.getLogNodeInputsEpoch()) model_interpreter.getModelResults(model, false, false, false, true); // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->getMetricNamesLinearized()) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::evaluationModelLogger(const int & n_epochs, Model<TensorT>& model, InterpreterT & model_interpreter, ModelLogger<TensorT>& model_logger, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes) { if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 1 == 0) { if (model_logger.getLogNodeOutputsEpoch()) model_interpreter.getModelResults(model, true, false, false, false); if (model_logger.getLogNodeInputsEpoch()) model_interpreter.getModelResults(model, false, false, false, true); model_logger.writeLogs(model, n_epochs, {}, {}, {}, {}, output_nodes, Eigen::Tensor<TensorT, 3>(), {}, output_nodes, {}, input_nodes, {}); } } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::evaluationModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const Eigen::Tensor<TensorT, 1>& model_metrics) { if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 10 == 0) { // Get the node values if logging the expected and predicted if (model_logger.getLogExpectedEpoch() || model_logger.getLogNodeOutputsEpoch()) model_interpreter.getModelResults(model, true, false, false, false); if (model_logger.getLogNodeInputsEpoch()) model_interpreter.getModelResults(model, false, false, false, true); // Create the metric headers and data arrays std::vector<std::string> log_headers; std::vector<TensorT> log_values; int metric_iter = 0; for (const std::string& metric_name : this->getMetricNamesLinearized()) { log_headers.push_back(metric_name); log_values.push_back(model_metrics(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_headers, {}, log_values, {}, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } } template<typename TensorT, typename InterpreterT> inline TensorT ModelTrainer<TensorT, InterpreterT>::reduceLROnPlateau(const std::vector<float>& model_errors, const TensorT & decay, const int & n_epochs_avg, const int & n_epochs_win, const TensorT & min_perc_error_diff) { assert(n_epochs_avg > n_epochs_win); // The number of average epochs is less than the number of windowed epochs. int cur_epoch = model_errors.size() - 1; // Check that enough epochs has elapsed if (cur_epoch < n_epochs_avg) return (TensorT)1; // Calculate the averages TensorT avg_error = 0; for (int i = 0; i < n_epochs_avg; ++i) { avg_error += model_errors.at(cur_epoch - i); } avg_error /= (TensorT)n_epochs_avg; TensorT win_error = 0; for (int i = 0; i < n_epochs_win; ++i) { win_error += model_errors.at(cur_epoch - i); } win_error /= (TensorT)n_epochs_win; // Check if the threshold has been met TensorT percent_diff = (avg_error - win_error) / avg_error; if (percent_diff < min_perc_error_diff) { return decay; } else return (TensorT)1; } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::ApplyModelLosses_(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& output, InterpreterT& model_interpreter) { int output_node_cnt = 0; for (auto& helper : this->loss_function_helpers_) { // Slice out the output Eigen::array<Eigen::Index, 3> offsets = {0, 0, output_node_cnt}; Eigen::array<Eigen::Index, 3> spans = { this->getBatchSize(), this->getMemorySize(), (int)helper.output_nodes_.size() }; Eigen::Tensor<TensorT, 3> expected = output.slice(offsets, spans); // Calculate the errors for (int loss_iter = 0; loss_iter < helper.loss_functions_.size(); ++loss_iter) { if (this->getNTETTSteps() < 0) model_interpreter.CETT(model, expected, helper.output_nodes_, helper.loss_functions_.at(loss_iter), helper.loss_function_grads_.at(loss_iter), this->getMemorySize()); else model_interpreter.CETT(model, expected, helper.output_nodes_, helper.loss_functions_.at(loss_iter), helper.loss_function_grads_.at(loss_iter), this->getNTETTSteps()); } output_node_cnt += helper.output_nodes_.size(); } } template<typename TensorT, typename InterpreterT> inline void ModelTrainer<TensorT, InterpreterT>::ApplyModelMetrics_(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& output, InterpreterT& model_interpreter) { int output_node_cnt = 0; int metric_cnt = 0; for (auto& helper : this->metric_function_helpers_) { // Slice out the output Eigen::array<Eigen::Index, 3> offsets = { 0, 0, output_node_cnt }; Eigen::array<Eigen::Index, 3> spans = { this->getBatchSize(), this->getMemorySize(), (int)helper.output_nodes_.size() }; Eigen::Tensor<TensorT, 3> expected = output.slice(offsets, spans); // Calculate the metrics for (size_t metric_iter = 0; metric_iter < helper.metric_functions_.size(); ++metric_iter) { if (this->getNTETTSteps() < 0) model_interpreter.CMTT(model, expected, helper.output_nodes_, helper.metric_functions_.at(metric_iter), this->getMemorySize(), metric_cnt); else model_interpreter.CMTT(model, expected, helper.output_nodes_, helper.metric_functions_.at(metric_iter), this->getNTETTSteps(), metric_cnt); ++metric_cnt; } output_node_cnt += helper.output_nodes_.size(); } } } #endif //EVONET_MODELTRAINER_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_DATAFILE_H #define EVONET_DATAFILE_H #include <unsupported/Eigen/CXX11/Tensor> #include <iostream> #include <fstream> #include <vector> namespace EvoNet { /** @brief DataFile based on the following: https://stackoverflow.com/questions/25389480/how-to-write-read-an-eigen-matrix-from-binary-file TODO copy over tests */ class DataFile { public: DataFile() = default; ///< Default constructor ~DataFile() = default; ///< Default destructor /** @brief Load data from file @param filename The name of the data file @param data The data to load data into @returns Status True on success, False if not */ template<typename T, int R> bool loadDataBinary(const std::string& filename, Eigen::Tensor<T, R>& data) { try { std::ifstream in(filename, std::ios::in | std::ios::binary); in.seekg(0); Eigen::array<Eigen::DenseIndex, R> dims; for (int i=0; i<R; ++i) { char value_char[12]; in.read((char*) (&value_char), sizeof(value_char)); dims[i] = (int)std::stoi(value_char); // printf("dimension loaded: %d = %d\n", i, dims[i]); // DEBUGGING } data = Eigen::Tensor<T, R>(dims); in.read((char *) data.data(), sizeof(data.data())); in.close(); return true; } catch (std::exception& e) { printf("Exception: %s", e.what()); return false; } catch (...) { printf("Exception"); return false; } }; /** @brief Load data from file @param filename The name of the data file @param data The data to load data into @returns Status True on success, False if not */ template<typename T, int R> bool storeDataBinary(const std::string& filename, const Eigen::Tensor<T, R>& data) { try { std::ofstream out(filename, std::ios::out | std::ios::binary | std::ios::trunc); for (int i=0; i<R; ++i) { char value_char[12]; // printf("dimension stored: %d = %d\n", i, data.dimension(i)); // DEBUGGING sprintf(value_char, "%d", data.dimension(i)); out.write((char*) (&value_char), sizeof(value_char)); // out.write((char*) (&data.dimension(i)), sizeof(typename Eigen::Tensor<T, R>::Index)); } out.write((char*) data.data(), data.size()*sizeof(typename Eigen::Tensor<T, R>::Scalar)); out.close(); return true; } catch (std::exception& e) { printf("Exception: %s", e.what()); return false; } catch (...) { printf("Exception"); return false; } }; }; } #endif //EVONET_DATAFILE_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE DataSimulator test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/DataSimulator.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(datasimulator) template<typename TensorT> class DataSimulatorExt : public DataSimulator<TensorT> { public: void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0.0f; // TODO } for (int nodes_iter = 0; nodes_iter < n_output_nodes; ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0.0f; // TODO; } } } } // update the time_steps time_steps.setConstant(1.0f); // TODO } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO } for (int nodes_iter = 0; nodes_iter < n_output_nodes; ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO; } } } // update the time_steps time_steps.setConstant(1.0f); // TODO } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_loss_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO } for (int nodes_iter = 0; nodes_iter < n_loss_output_nodes; ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO; } for (int nodes_iter = 0; nodes_iter < n_metric_output_nodes; ++nodes_iter) { metric_output_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO; } } } // update the time_steps time_steps.setConstant(1.0f); // TODO } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0.0f; // TODO } for (int nodes_iter = 0; nodes_iter < n_output_nodes; ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0.0f; // TODO; } } } } // update the time_steps time_steps.setConstant(1.0f); // TODO } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO } for (int nodes_iter = 0; nodes_iter < n_output_nodes; ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO; } } } // update the time_steps time_steps.setConstant(1.0f); // TODO } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_loss_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO } for (int nodes_iter = 0; nodes_iter < n_loss_output_nodes; ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO; } for (int nodes_iter = 0; nodes_iter < n_metric_output_nodes; ++nodes_iter) { metric_output_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO; } } } // update the time_steps time_steps.setConstant(1.0f); // TODO } void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_epochs = input_data.dimension(3); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0.0f; // TODO } } } } // update the time_steps time_steps.setConstant(1.0f); // TODO } void simulateEvaluationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = metric_output_data.dimension(2); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO } for (int nodes_iter = 0; nodes_iter < n_output_nodes; ++nodes_iter) { metric_output_data(batch_iter, memory_iter, nodes_iter) = 0.0f; // TODO } } } // update the time_steps time_steps.setConstant(1.0f); // TODO } }; BOOST_AUTO_TEST_CASE(constructor) { DataSimulatorExt<float>* ptr = nullptr; DataSimulatorExt<float>* nullPointer = nullptr; ptr = new DataSimulatorExt<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { DataSimulatorExt<float>* ptr = nullptr; ptr = new DataSimulatorExt<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(simulateTrainingData1) { DataSimulatorExt<float> datasimulator; Eigen::Tensor<float, 4> input_data(1, 1, 1, 1); Eigen::Tensor<float, 4> output_data(1, 1, 1, 1); Eigen::Tensor<float, 3> time_steps(1, 1, 1); datasimulator.simulateTrainingData(input_data, output_data, time_steps); BOOST_CHECK_EQUAL(input_data(0, 0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(output_data(0, 0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(time_steps(0, 0, 0), 1.0f); } BOOST_AUTO_TEST_CASE(simulateTrainingData2) { DataSimulatorExt<float> datasimulator; Eigen::Tensor<float, 3> input_data(1, 1, 1); Eigen::Tensor<float, 3> output_data(1, 1, 1); Eigen::Tensor<float, 2> time_steps(1, 1); datasimulator.simulateTrainingData(input_data, output_data, time_steps); BOOST_CHECK_EQUAL(input_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(output_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(time_steps(0, 0), 1.0f); } BOOST_AUTO_TEST_CASE(simulateTrainingData3) { DataSimulatorExt<float> datasimulator; Eigen::Tensor<float, 3> input_data(1, 1, 1); Eigen::Tensor<float, 3> loss_output_data(1, 1, 1); Eigen::Tensor<float, 3> metric_output_data(1, 1, 1); Eigen::Tensor<float, 2> time_steps(1, 1); datasimulator.simulateTrainingData(input_data, loss_output_data, metric_output_data, time_steps); BOOST_CHECK_EQUAL(input_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(loss_output_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(metric_output_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(time_steps(0, 0), 1.0f); } BOOST_AUTO_TEST_CASE(simulateValidationData1) { DataSimulatorExt<float> datasimulator; Eigen::Tensor<float, 4> input_data(1, 1, 1, 1); Eigen::Tensor<float, 4> output_data(1, 1, 1, 1); Eigen::Tensor<float, 3> time_steps(1, 1, 1); datasimulator.simulateValidationData(input_data, output_data, time_steps); BOOST_CHECK_EQUAL(input_data(0, 0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(output_data(0, 0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(time_steps(0, 0, 0), 1.0f); } BOOST_AUTO_TEST_CASE(simulateValidationData2) { DataSimulatorExt<float> datasimulator; Eigen::Tensor<float, 3> input_data(1, 1, 1); Eigen::Tensor<float, 3> output_data(1, 1, 1); Eigen::Tensor<float, 2> time_steps(1, 1); datasimulator.simulateValidationData(input_data, output_data, time_steps); BOOST_CHECK_EQUAL(input_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(output_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(time_steps(0, 0), 1.0f); } BOOST_AUTO_TEST_CASE(simulateValidationData3) { DataSimulatorExt<float> datasimulator; Eigen::Tensor<float, 3> input_data(1, 1, 1); Eigen::Tensor<float, 3> loss_output_data(1, 1, 1); Eigen::Tensor<float, 3> metric_output_data(1, 1, 1); Eigen::Tensor<float, 2> time_steps(1, 1); datasimulator.simulateValidationData(input_data, loss_output_data, metric_output_data, time_steps); BOOST_CHECK_EQUAL(input_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(loss_output_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(metric_output_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(time_steps(0, 0), 1.0f); } BOOST_AUTO_TEST_CASE(simulateEvaluationData1) { DataSimulatorExt<float> datasimulator; Eigen::Tensor<float, 4> input_data(1, 1, 1, 1); Eigen::Tensor<float, 3> time_steps(1, 1, 1); datasimulator.simulateEvaluationData(input_data, time_steps); BOOST_CHECK_EQUAL(input_data(0, 0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(time_steps(0, 0, 0), 1.0f); } BOOST_AUTO_TEST_CASE(simulateEvaluationData2) { DataSimulatorExt<float> datasimulator; Eigen::Tensor<float, 3> input_data(1, 1, 1); Eigen::Tensor<float, 3> metric_output_data(1, 1, 1); Eigen::Tensor<float, 2> time_steps(1, 1); datasimulator.simulateEvaluationData(input_data, metric_output_data, time_steps); BOOST_CHECK_EQUAL(input_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(metric_output_data(0, 0, 0), 0.0f); BOOST_CHECK_EQUAL(time_steps(0, 0), 1.0f); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_LINKFILE_H #define EVONET_LINKFILE_H #include <EvoNet/ml/Link.h> #include <iostream> #include <fstream> #include <map> #include <memory> namespace EvoNet { /** @brief LinkFile */ class LinkFile { public: LinkFile() = default; ///< Default constructor ~LinkFile() = default; ///< Default destructor /** @brief Load links from file @param filename The name of the links file @param links The links to load data into @returns Status True on success, False if not */ bool loadLinksBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Link>>& links); bool loadLinksCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Link>>& links); /** @brief save links to file @param filename The name of the links file @param links The links to load data into @returns Status True on success, False if not */ bool storeLinksBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Link>>& links); bool storeLinksCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Link>>& links); }; } #endif //EVONET_LINKFILE_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE DataFile test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/DataFile.h> // #include <filesystem> C++ 17 using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(DataFile1) BOOST_AUTO_TEST_CASE(constructor) { DataFile* ptr = nullptr; DataFile* nullPointer = nullptr; ptr = new DataFile(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { DataFile* ptr = nullptr; ptr = new DataFile(); delete ptr; } BOOST_AUTO_TEST_CASE(storeAndLoadBinary) { DataFile data; // std::path data_path = std::current_path().replace_filename("data"); C++ 17 // data_path /= "DataFileTest.dat"; C++ 17 std::string filename = "DataFileTest.dat"; Eigen::Tensor<float, 3> random_dat(2,2,2); random_dat.setRandom(); data.storeDataBinary<float, 3>(filename, random_dat); // data.storeDataBinary(data_path.string(), random_dat); C++ 17 Eigen::Tensor<float, 3> test_dat(2,2,2); data.loadDataBinary<float, 3>(filename, test_dat); // data.loadDataBinary(data_path.string(), test_dat); C++ 17 BOOST_CHECK_CLOSE(test_dat(0, 0, 0), random_dat(0, 0, 0), 1e-6); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/Link.h> #include <vector> #include <cmath> #include <iostream> namespace EvoNet { Link::Link() { } Link::Link(const Link& other) { id_ = other.id_; name_ = other.name_; module_id_ = other.module_id_; module_name_ = other.module_name_; source_node_name_ = other.source_node_name_; sink_node_name_ = other.sink_node_name_; weight_name_ = other.weight_name_; } Link::Link(const int& id): id_(id) { if (name_ == "") { name_ = std::to_string(id); } } Link::Link(const std::string& name): name_(name) { } Link::Link(const std::string& name, const std::string& source_node_name, const std::string& sink_node_name, const std::string& weight_name): name_(name), weight_name_(weight_name) { setSourceNodeName(source_node_name); setSinkNodeName(sink_node_name); } Link::~Link() { } void Link::setId(const int& id) { id_ = id; } int Link::getId() const { return id_; } void Link::setName(const std::string& name) { name_ = name; } std::string Link::getName() const { return name_; } void Link::setSourceNodeName(const std::string& source_node_name) { if (sink_node_name_ == source_node_name) { std::cout << "Source and Sink nodes are the same!" << std::endl; source_node_name_ = source_node_name; } else { source_node_name_ = source_node_name; } } std::string Link::getSourceNodeName() const { return source_node_name_; } void Link::setSinkNodeName(const std::string& sink_node_name) { sink_node_name_ = sink_node_name; } std::string Link::getSinkNodeName() const { return sink_node_name_; } void Link::setWeightName(const std::string& weight_name) { weight_name_ = weight_name; } std::string Link::getWeightName() const { return weight_name_; } void Link::setModuleId(const int & module_id) { module_id_ = module_id; } int Link::getModuleId() const { return module_id_; } void Link::setModuleName(const std::string & module_name) { module_name_ = module_name; } std::string Link::getModuleName() const { return module_name_; } }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_POPULATIONTRAINERDEFAULTDEVICE_H #define EVONET_POPULATIONTRAINERDEFAULTDEVICE_H // .h #include <EvoNet/ml/PopulationTrainer.h> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> // .cpp namespace EvoNet { /** @brief Class to train a vector of models */ template<typename TensorT> class PopulationTrainerDefaultDevice : public PopulationTrainer<TensorT, ModelInterpreterDefaultDevice<TensorT>> { }; } #endif //EVONET_POPULATIONTRAINERDEFAULTDEVICE_H<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/simulator/BiochemicalReaction.h> #include <unsupported/Eigen/CXX11/Tensor> #include <EvoNet/io/ModelFile.h> using namespace EvoNet; // Other extended classes template<typename TensorT> class LatentArithmetic { public: LatentArithmetic() {}; ~LatentArithmetic() = default; /* @brief Generate an encoded latent space @param[in] sample_group_name @returns 4D Tensor of the encoded latent space */ Eigen::Tensor<TensorT, 4> generateEncoding( const std::string& sample_group_name, ModelTrainerDefaultDevice<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger) { // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < this->n_input_nodes_; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the mu nodes std::vector<std::string> encoding_nodes_mu; for (int i = 0; i < this->encoding_size_; ++i) { char name_char[512]; sprintf(name_char, "Mu_%012d", i); std::string name(name_char); encoding_nodes_mu.push_back(name); } // generate the input for condition_1 and condition_2 Eigen::Tensor<TensorT, 4> condition_1_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)input_nodes.size(), model_trainer.getNEpochsEvaluation()); Eigen::Tensor<TensorT, 3> time_steps_1_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), model_trainer.getNEpochsEvaluation()); this->metabolomics_data_.sample_group_name_ = sample_group_name; this->metabolomics_data_.simulateEvaluationData(condition_1_input, time_steps_1_input); // evaluate the encoder for condition_1 and condition_2 model_trainer.setLossOutputNodes({ encoding_nodes_mu }); Eigen::Tensor<TensorT, 4> condition_1_output = model_trainer.evaluateModel( this->model_encoder_, condition_1_input, time_steps_1_input, input_nodes, model_logger, this->model_interpreter_encoder_); return condition_1_output; } /* @brief Generate a reconstruction of the latent space @param[in] encoding_output 4D Tensor of the encoded latent space @returns 4D Tensor of the reconstruction */ Eigen::Tensor<TensorT, 4> generateReconstruction( Eigen::Tensor<TensorT, 4>& encoding_output, ModelTrainerDefaultDevice<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger) { // Make the reconstruction nodes std::vector<std::string> output_nodes_reconstruction; for (int i = 0; i < this->n_input_nodes_; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes_reconstruction.push_back(name); } // Make the encoding nodes std::vector<std::string> encoding_nodes; for (int i = 0; i < this->encoding_size_; ++i) { char name_char[512]; sprintf(name_char, "Encoding_%012d", i); std::string name(name_char); encoding_nodes.push_back(name); } // evaluate the decoder Eigen::Tensor<TensorT, 3> time_steps_1_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), model_trainer.getNEpochsEvaluation()); model_trainer.setLossOutputNodes({ output_nodes_reconstruction }); Eigen::Tensor<TensorT, 4> reconstructed_output = model_trainer.evaluateModel( this->model_decoder_, encoding_output, time_steps_1_input, encoding_nodes, model_logger, this->model_interpreter_decoder_); return reconstructed_output; } /* @brief Score a reconstruction or input using a similarity metric function @param[in] sample_group_name_expected */ std::pair<TensorT,TensorT> scoreReconstructionSimilarity(const std::string& sample_group_name_expected, const Eigen::Tensor<TensorT, 4>& reconstructed_output, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, ModelTrainerDefaultDevice<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger) { // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < this->n_input_nodes_; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the classification nodes std::vector<std::string> output_nodes_normalization; for (int i = 0; i < this->n_input_nodes_; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes_normalization.push_back(name); } // generate the input for the expected Eigen::Tensor<TensorT, 4> condition_1_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)input_nodes.size(), model_trainer.getNEpochsEvaluation()); Eigen::Tensor<TensorT, 3> time_steps_1_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), model_trainer.getNEpochsEvaluation()); this->metabolomics_data_.sample_group_name_ = sample_group_name_expected; this->metabolomics_data_.simulateEvaluationData(condition_1_input, time_steps_1_input); // score the decoded data using the classification model // TODO: currently just ignoring the `is_neg` nodes in the score Eigen::Tensor<TensorT, 3> expected = condition_1_input.chip(0, 1); Eigen::Tensor<TensorT, 2> predicted = reconstructed_output.chip(0, 3).chip(0, 1); //Eigen::Tensor<TensorT, 2> predicted = normalization_reconstruction.chip(0, 3).chip(0, 1); Eigen::Tensor<TensorT, 2> score_mean(1, 1); score_mean.setZero(); Eigen::Tensor<TensorT, 2> score_var(1, 1); score_var.setZero(); Eigen::DefaultDevice device; metric_function.setReductionFunc(std::string("Mean")); metric_function(predicted.data(), expected.data(), score_mean.data(), model_trainer.getBatchSize(), model_trainer.getMemorySize(), this->n_input_nodes_, 1, 0, 0, device); metric_function.setReductionFunc(std::string("Var")); metric_function(predicted.data(), expected.data(), score_var.data(), model_trainer.getBatchSize(), model_trainer.getMemorySize(), this->n_input_nodes_, 1, 0, 0, device); return std::make_pair(score_mean(0, 0), score_var(0, 0)); }; /* @brief Score the similarity between data sets using a similarity metric function TODO: refactor to allow for using the GPU @param[in] sample_group_name_expected */ std::pair<TensorT,TensorT> scoreDataSimilarity(const std::string& sample_group_name_expected, const std::string& sample_group_name_predicted, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, ModelTrainerDefaultDevice<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger) { // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < this->n_input_nodes_; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the classification nodes std::vector<std::string> output_nodes_normalization; for (int i = 0; i < this->n_input_nodes_; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes_normalization.push_back(name); } // generate the input for the expected Eigen::Tensor<TensorT, 4> condition_1_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)input_nodes.size(), model_trainer.getNEpochsEvaluation()); Eigen::Tensor<TensorT, 3> time_steps_1_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), model_trainer.getNEpochsEvaluation()); this->metabolomics_data_.sample_group_name_ = sample_group_name_expected; this->metabolomics_data_.simulateEvaluationData(condition_1_input, time_steps_1_input); // normalize the expected model_trainer.setLossOutputNodes({ output_nodes_normalization }); Eigen::Tensor<TensorT, 4> normalization_output_1 = model_trainer.evaluateModel( this->model_normalization_, condition_1_input, time_steps_1_input, input_nodes, model_logger, this->model_interpreter_normalization_); // generate the input for the predicted Eigen::Tensor<TensorT, 4> condition_2_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), (int)input_nodes.size(), model_trainer.getNEpochsEvaluation()); Eigen::Tensor<TensorT, 3> time_steps_2_input(model_trainer.getBatchSize(), model_trainer.getMemorySize(), model_trainer.getNEpochsEvaluation()); this->metabolomics_data_.sample_group_name_ = sample_group_name_predicted; this->metabolomics_data_.simulateEvaluationData(condition_2_input, time_steps_2_input); // normalize the expected model_trainer.setLossOutputNodes({ output_nodes_normalization }); Eigen::Tensor<TensorT, 4> normalization_output_2 = model_trainer.evaluateModel( this->model_normalization_, condition_2_input, time_steps_2_input, input_nodes, model_logger, this->model_interpreter_normalization_); // score the decoded data using the classification model // TODO: currently just ignoring the `is_neg` nodes in the score Eigen::Tensor<TensorT, 3> expected = normalization_output_1.chip(0, 1); Eigen::Tensor<TensorT, 2> predicted = normalization_output_2.chip(0, 3).chip(0, 1); Eigen::Tensor<TensorT, 2> score_mean(1, 1); score_mean.setZero(); Eigen::Tensor<TensorT, 2> score_var(1, 1); score_var.setZero(); Eigen::DefaultDevice device; metric_function.setReductionFunc(std::string("Mean")); metric_function(predicted.data(), expected.data(), score_mean.data(), model_trainer.getBatchSize(), model_trainer.getMemorySize(), this->n_input_nodes_, 1, 0, 0, device); metric_function.setReductionFunc(std::string("Var")); metric_function(predicted.data(), expected.data(), score_var.data(), model_trainer.getBatchSize(), model_trainer.getMemorySize(), this->n_input_nodes_, 1, 0, 0, device); return std::make_pair(score_mean(0,0), score_var(0,0)); }; int getNInputNodes() const { return n_input_nodes_; } int getNOutputNodes() const { return n_output_nodes_; } int encoding_size_ = 16; bool simulate_MARs_ = false; bool sample_concs_ = true; bool use_fold_change_ = false; std::string ref_fold_change_ = ""; protected: /// Defined in setMetabolomicsData MetDataSimLatentArithmetic<TensorT> metabolomics_data_; BiochemicalReactionModel<TensorT> reaction_model_; int n_input_nodes_ = -1; int n_output_nodes_ = -1; /// Defined in setEncDecModels Model<TensorT> model_decoder_; Model<TensorT> model_encoder_; ModelInterpreterDefaultDevice<TensorT> model_interpreter_decoder_; ModelInterpreterDefaultDevice<TensorT> model_interpreter_encoder_; }; /* @brief Compute the similarity between data sets TODO: refactor to allow for running on the GPU */ template<typename TensorT> void computeDataSimilarity(const std::vector<std::string>& predicted, const std::vector<std::string>& expected, LatentArithmetic<TensorT>& latentArithmetic, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, ModelTrainerExt<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger, const bool& init_interpreter) { assert(predicted.size() == expected.size()); for (int case_iter = 0; case_iter < predicted.size(); ++case_iter) { //// Determine when to initialize the model interpreter //if (case_iter == 0 && init_interpreter) { // model_trainer.setInterpretModel(true); //} //else { // model_trainer.setInterpretModel(false); //} //model_trainer.setResetModel(false); //model_trainer.setResetInterpreter(false); // Calculate the similarity std::pair<TensorT, TensorT> score = latentArithmetic.scoreDataSimilarity(expected.at(case_iter), predicted.at(case_iter), metric_function, model_trainer, model_logger); std::cout << expected.at(case_iter) << " -> " << predicted.at(case_iter) << ": " << score.first << " +/- " << score.second << std::endl; } } /* @brief Compute the similarity between the data and the generated data TODO: refactor to allow for running on the GPU */ template<typename TensorT> void computeGenerationSimilarity(const std::vector<std::string>& predicted, const std::vector<std::string>& expected, LatentArithmetic<TensorT>& latentArithmetic, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, ModelTrainerExt<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger, const bool& init_interpreter) { assert(predicted.size() == expected.size()); //// define the reconstruction output //Eigen::Tensor<TensorT, 4> reconstruction_output(model_trainer.getBatchSize(), model_trainer.getMemorySize(), latentArithmetic.getNInputNodes(), model_trainer.getNEpochsEvaluation()); for (int case_iter = 0; case_iter < predicted.size(); ++case_iter) { //// Determine when to initialize the model interpreter //if (case_iter == 0 && init_interpreter) { // model_trainer.setInterpretModel(true); //} //else { // model_trainer.setInterpretModel(false); //} //model_trainer.setResetModel(false); //model_trainer.setResetInterpreter(false); // Generate the encoding and decoding and score the result auto encoding_output = latentArithmetic.generateEncoding(predicted.at(case_iter), model_trainer, model_logger); auto reconstruction_output = latentArithmetic.generateReconstruction(encoding_output, model_trainer, model_logger); std::pair<TensorT, TensorT> score = latentArithmetic.scoreReconstructionSimilarity(expected.at(case_iter), reconstruction_output, metric_function, model_trainer, model_logger); std::cout << predicted.at(case_iter) << " -> " << expected.at(case_iter) << ": " << score.first << " +/- " << score.second << std::endl; } } /* @brief Compute the similarity between the data and the generated data after a latent arithmetic operation TODO: refactor to allow for running on the GPU */ template<typename TensorT> void computeLatentArithmeticSimilarity(const std::vector<std::string>& condition_1, const std::vector<std::string>& condition_2, const std::vector<std::string>& expected, LatentArithmetic<TensorT>& latentArithmetic, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, ModelTrainerExt<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger, const bool& init_interpreter, const std::string& latent_operation) { assert(condition_1.size() == condition_2.size()); assert(expected.size() == condition_2.size()); assert(condition_1.size() == expected.size()); for (int case_iter = 0; case_iter < condition_1.size(); ++case_iter) { //// Determine when to initialize the model interpreter //if (case_iter == 0) { // model_trainer.setInterpretModel(true); //} //else { // model_trainer.setInterpretModel(false); //} //model_trainer.setResetModel(false); //model_trainer.setResetInterpreter(false); // define the reconstruction output Eigen::Tensor<TensorT, 4> reconstruction_output(model_trainer.getBatchSize(), model_trainer.getMemorySize(), latentArithmetic.getNInputNodes(), model_trainer.getNEpochsEvaluation()); // Calculate the latent arithmetic auto encoding_output_1 = latentArithmetic.generateEncoding(condition_1.at(case_iter), model_trainer, model_logger); auto encoding_output_2 = latentArithmetic.generateEncoding(condition_2.at(case_iter), model_trainer, model_logger); if (latent_operation == "-") { Eigen::Tensor<TensorT, 4> encoding_output = encoding_output_1 - encoding_output_2; reconstruction_output = latentArithmetic.generateReconstruction(encoding_output, model_trainer, model_logger); } else if (latent_operation == "+") { Eigen::Tensor<TensorT, 4> encoding_output = encoding_output_1 + encoding_output_2; reconstruction_output = latentArithmetic.generateReconstruction(encoding_output, model_trainer, model_logger); } // Score the reconstruction similarity to the expected std::pair<TensorT, TensorT> score = latentArithmetic.scoreReconstructionSimilarity(expected.at(case_iter), reconstruction_output, metric_function, model_trainer, model_logger); std::cout << condition_1.at(case_iter) << " " << latent_operation << " " << condition_2.at(case_iter) << " -> " << expected.at(case_iter) << ": " << score.first << " +/- " << score.second << std::endl; } } /* @brief Compute the similarity between the data and the generated data after a latent interpolation TODO: refactor to allow for running on the GPU */ template<typename TensorT> void computeLatentInterpolationSimilarity(const std::vector<std::string>& condition_1, const std::vector<std::string>& condition_2, const std::vector<std::vector<std::string>>& expected, LatentArithmetic<TensorT>& latentArithmetic, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, ModelTrainerExt<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger, const bool& init_interpreter, const bool& interp_q1, const bool& interp_median, const bool& interp_q3) { assert(condition_1.size() == condition_2.size()); assert(expected.size() == condition_2.size()); assert(condition_1.size() == expected.size()); //// define the reconstruction output (is this leading to data corruption?) //Eigen::Tensor<TensorT, 4> reconstruction_output(model_trainer.getBatchSize(), model_trainer.getMemorySize(), latentArithmetic.getNInputNodes(), model_trainer.getNEpochsEvaluation()); for (int case_iter = 0; case_iter < condition_1.size(); ++case_iter) { // Determine when to initialize the model interpreter //if (case_iter == 0) { // model_trainer.setInterpretModel(true); //} //else { // model_trainer.setInterpretModel(false); //} //model_trainer.setResetModel(false); //model_trainer.setResetInterpreter(false); // Generate the encodings auto encoding_output_1 = latentArithmetic.generateEncoding(condition_1.at(case_iter), model_trainer, model_logger); auto encoding_output_2 = latentArithmetic.generateEncoding(condition_2.at(case_iter), model_trainer, model_logger); // Interpolations if (interp_q1) { Eigen::Tensor<TensorT, 4> encoding_output = encoding_output_1 * encoding_output_1.constant(0.25) + encoding_output_2 * encoding_output_2.constant(0.75); auto reconstruction_output = latentArithmetic.generateReconstruction(encoding_output, model_trainer, model_logger); for (int trial_iter = 0; trial_iter < expected.at(case_iter).size(); ++trial_iter) { std::pair<TensorT, TensorT> score = latentArithmetic.scoreReconstructionSimilarity(expected.at(case_iter).at(trial_iter), reconstruction_output, metric_function, model_trainer, model_logger); std::cout << "0.25 * " << condition_1.at(case_iter) << " + 0.75 * " << condition_2.at(case_iter) << " -> " << expected.at(case_iter).at(trial_iter) << ": " << score.first << " +/- " << score.second << std::endl; } } if (interp_median) { Eigen::Tensor<TensorT, 4> encoding_output = encoding_output_1 * encoding_output_1.constant(0.5) + encoding_output_2 * encoding_output_2.constant(0.5); auto reconstruction_output = latentArithmetic.generateReconstruction(encoding_output, model_trainer, model_logger); for (int trial_iter = 0; trial_iter < expected.at(case_iter).size(); ++trial_iter) { std::pair<TensorT, TensorT> score = latentArithmetic.scoreReconstructionSimilarity(expected.at(case_iter).at(trial_iter), reconstruction_output, metric_function, model_trainer, model_logger); std::cout << "0.5 * " << condition_1.at(case_iter) << " + 0.5 * " << condition_2.at(case_iter) << " -> " << expected.at(case_iter).at(trial_iter) << ": " << score.first << " +/- " << score.second << std::endl; } } if (interp_q3) { Eigen::Tensor<TensorT, 4> encoding_output = encoding_output_1 * encoding_output_1.constant(0.75) + encoding_output_2 * encoding_output_2.constant(0.25); auto reconstruction_output = latentArithmetic.generateReconstruction(encoding_output, model_trainer, model_logger); for (int trial_iter = 0; trial_iter < expected.at(case_iter).size(); ++trial_iter) { std::pair<TensorT, TensorT> score = latentArithmetic.scoreReconstructionSimilarity(expected.at(case_iter).at(trial_iter), reconstruction_output, metric_function, model_trainer, model_logger); std::cout << "0.75 * " << condition_1.at(case_iter) << " + 0.25 * " << condition_2.at(case_iter) << " -> " << expected.at(case_iter).at(trial_iter) << ": " << score.first << " +/- " << score.second << std::endl; } } } } /// KALE Latent arithmetic and interpolation script template<typename TensorT> void main_KALE(ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelTrainerExt<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger, LatentArithmetic<TensorT>& latentArithmetic, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, const bool& compute_data_similarities, const bool& compute_generation_similarity, const bool& compute_latent_arithmetic, const bool& compute_latent_interpolation) { // NOTE: similarity metric of Manhattan distance used as per 10.1109/TCBB.2016.2586065 // that found the following similarity metrics to work well for metabolomic prfile data: // Minkowski distance, Euclidean distance, Manhattan distance, Jeffreys & Matusita distance, Dice’s coefficient, Jaccard similarity coefficient // and the following similarity metrics to be unsuitable for metabolomic profile data: // Canberra distance, relative distance, and cosine of angle std::vector<std::string> condition_1, condition_2, predicted, expected; if (compute_data_similarities) { // Reference similarity metrics //predicted = { "Evo04Evo01EP", "Evo04Evo02EP", "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", // "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }; //expected = { "Evo04", "Evo04", "Evo04gnd", "Evo04gnd", "Evo04pgi", "Evo04pgi", // "Evo04ptsHIcrr", "Evo04ptsHIcrr", "Evo04sdhCB", "Evo04sdhCB", "Evo04tpiA", "Evo04tpiA" }; //computeDataSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, // true); //predicted = std::vector<std::string>({ "Evo04gnd", "Evo04pgi", "Evo04ptsHIcrr", "Evo04sdhCB", "Evo04tpiA" }); //expected = std::vector<std::string>({ "Evo04", "Evo04", "Evo04", "Evo04", "Evo04" }); //computeDataSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, // true); //predicted = std::vector<std::string>({ "Evo04Evo01EP", "Evo04Evo02EP", "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", // "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }); //expected = std::vector<std::string>({ "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", // "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04" }); //computeDataSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, // true); predicted = std::vector<std::string>({ "Evo04", "Evo04Evo01EP", "Evo04Evo02EP", "Evo04gnd", "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgi", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", "Evo04ptsHIcrr", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCB", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiA", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }); expected = std::vector<std::string>({ "Evo04", "Evo04Evo01EP", "Evo04Evo02EP", "Evo04gnd", "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgi", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", "Evo04ptsHIcrr", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCB", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiA", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }); computeDataSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, true); } if (compute_generation_similarity) { predicted = { "Evo04", "Evo04Evo01EP", "Evo04Evo02EP", "Evo04gnd", "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgi", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", "Evo04ptsHIcrr", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCB", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiA", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }; expected = { "Evo04", "Evo04Evo01EP", "Evo04Evo02EP", "Evo04gnd", "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgi", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", "Evo04ptsHIcrr", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCB", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiA", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }; computeGenerationSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, true); } if (compute_latent_arithmetic) { // 1. EPi - KO -> Ref condition_1 = { "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }; condition_2 = { "Evo04gnd", "Evo04gnd", "Evo04pgi", "Evo04pgi", "Evo04ptsHIcrr", "Evo04ptsHIcrr", "Evo04sdhCB", "Evo04sdhCB", "Evo04tpiA", "Evo04tpiA" }; expected = { "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04" }; computeLatentArithmeticSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, "-"); // 2. EPi - Ref -> KO condition_1 = { "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }; condition_2 = { "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04" }; expected = { "Evo04gnd", "Evo04gnd", "Evo04pgi", "Evo04pgi", "Evo04ptsHIcrr", "Evo04ptsHIcrr", "Evo04sdhCB", "Evo04sdhCB", "Evo04tpiA", "Evo04tpiA" }; computeLatentArithmeticSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, "-"); // 3. KOi + Ref -> EPi condition_1 = { "Evo04gnd", "Evo04gnd", "Evo04pgi", "Evo04pgi", "Evo04ptsHIcrr", "Evo04ptsHIcrr", "Evo04sdhCB", "Evo04sdhCB", "Evo04tpiA", "Evo04tpiA" }; condition_2 = { "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04", "Evo04" }; expected = { "Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", "Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }; computeLatentArithmeticSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, "+"); } if (compute_latent_interpolation) { const std::vector<std::string> condition_1 = { //"Evo04gnd", "Evo04gnd", "Evo04pgi", "Evo04pgi", "Evo04ptsHIcrr", "Evo04ptsHIcrr", //"Evo04sdhCB", "Evo04sdhCB", "Evo04tpiA", "Evo04tpiA" }; const std::vector<std::string> condition_2 = { //"Evo04gndEvo01EP", "Evo04gndEvo02EP", "Evo04pgiEvo01EP", "Evo04pgiEvo02EP", "Evo04ptsHIcrrEvo01EP", "Evo04ptsHIcrrEvo02EP", //"Evo04sdhCBEvo01EP", "Evo04sdhCBEvo02EP", "Evo04tpiAEvo01EP", "Evo04tpiAEvo02EP" }; const std::vector<std::vector<std::string>> expected = { //{"Evo04gnd", "Evo04gndEvo01EP"}, {"Evo04gnd", "Evo04gndEvo02EP"}, // Pgi: J01 and J02 for Evo01, J01, J02, and J03 for all others {"Evo04pgi", "Evo04pgiEvo01J01", "Evo04pgiEvo01J02", "Evo04pgiEvo01EP"}, {"Evo04pgi", "Evo04pgiEvo02J01", "Evo04pgiEvo02J02", "Evo04pgiEvo02J03", "Evo04pgiEvo02EP"}, // PtsHIcrr: J01 and J03 for Evo01 and EVo02, J01, J03, J04 for Evo03 and Ev04 {"Evo04ptsHIcrr", "Evo04ptsHIcrrEvo01J01", "Evo04ptsHIcrrEvo01J03", "Evo04ptsHIcrrEvo01EP"}, {"Evo04ptsHIcrr", "Evo04ptsHIcrrEvo02J01", "Evo04ptsHIcrrEvo02J03", "Evo04ptsHIcrrEvo02EP"}, //{"Evo04sdhCB", "Evo04sdhCBEvo01EP"}, {"Evo04sdhCB", "Evo04sdhCBEvo02EP"}, // TpiA: J01 and J03 for all {"Evo04tpiA", "Evo04tpiAEvo01J01", "Evo04tpiAEvo01J03", "Evo04tpiAEvo01EP"}, {"Evo04tpiA", "Evo04tpiAEvo02J01", "Evo04tpiAEvo02J03", "Evo04tpiAEvo02EP"} }; computeLatentInterpolationSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, true, true, true); } } /// Industrial strain reconstruction script template<typename TensorT> void main_IndustrialStrains(ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelTrainerExt<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger, LatentArithmetic<TensorT>& latentArithmetic, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, const bool& compute_data_similarities, const bool& compute_generation_similarity) { // NOTE: similarity metric of Manhattan distance used as per 10.1109/TCBB.2016.2586065 // that found the following similarity metrics to work well for metabolomic prfile data: // Minkowski distance, Euclidean distance, Manhattan distance, Jeffreys & Matusita distance, Dice’s coefficient, Jaccard similarity coefficient // and the following similarity metrics to be unsuitable for metabolomic profile data: // Canberra distance, relative distance, and cosine of angle std::vector<std::string> condition_1, condition_2, predicted, expected; if (compute_data_similarities) { // Reference similarity metrics predicted = { "EColi_BL21","EColi_C","EColi_Crooks","EColi_DH5a","EColi_MG1655","EColi_W","EColi_W3110" }; expected = { "EColi_BL21","EColi_C","EColi_Crooks","EColi_DH5a","EColi_MG1655","EColi_W","EColi_W3110" }; computeDataSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, true); } if (compute_generation_similarity) { predicted = { "EColi_BL21","EColi_C","EColi_Crooks","EColi_DH5a","EColi_MG1655","EColi_W","EColi_W3110" }; expected = { "EColi_BL21","EColi_C","EColi_Crooks","EColi_DH5a","EColi_MG1655","EColi_W","EColi_W3110" }; computeGenerationSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, true); } } /// PLT time-course Latent arithmetic and interpolation script template<typename TensorT> void main_PLT(ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelTrainerExt<TensorT>& model_trainer, ModelLogger<TensorT>& model_logger, LatentArithmetic<TensorT>& latentArithmetic, MetricFunctionTensorOp<TensorT, Eigen::DefaultDevice>& metric_function, const bool& compute_data_similarities, const bool& compute_generation_similarity, const bool& compute_latent_arithmetic, const bool& compute_latent_interpolation) { // NOTE: similarity metric of Manhattan distance used as per 10.1109/TCBB.2016.2586065 // that found the following similarity metrics to work well for metabolomic prfile data: // Minkowski distance, Euclidean distance, Manhattan distance, Jeffreys & Matusita distance, Dice’s coefficient, Jaccard similarity coefficient // and the following similarity metrics to be unsuitable for metabolomic profile data: // Canberra distance, relative distance, and cosine of angle std::vector<std::string> condition_1, condition_2, predicted, expected; if (compute_data_similarities) { // Reference similarity metrics predicted = { "S01_D01_PLT_37C_22hr", "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_6.5hr", "S01_D01_PLT_25C_0hr", "S01_D02_PLT_37C_22hr", "S01_D02_PLT_25C_22hr", "S01_D02_PLT_25C_6.5hr", "S01_D02_PLT_25C_0hr", "S01_D05_PLT_37C_22hr", "S01_D05_PLT_25C_22hr", "S01_D05_PLT_25C_6.5hr", "S01_D05_PLT_25C_0hr" }; expected = { "S01_D01_PLT_25C_0hr", "S01_D01_PLT_25C_0hr", "S01_D01_PLT_25C_0hr", "S01_D01_PLT_25C_0hr", "S01_D02_PLT_25C_0hr", "S01_D02_PLT_25C_0hr","S01_D02_PLT_25C_0hr","S01_D02_PLT_25C_0hr", "S01_D05_PLT_25C_0hr", "S01_D05_PLT_25C_0hr", "S01_D05_PLT_25C_0hr", "S01_D05_PLT_25C_0hr" }; computeDataSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, true); predicted = { "S01_D02_PLT_37C_22hr", "S01_D02_PLT_25C_22hr", "S01_D02_PLT_25C_6.5hr", "S01_D02_PLT_25C_0hr", "S01_D05_PLT_37C_22hr", "S01_D05_PLT_25C_22hr", "S01_D05_PLT_25C_6.5hr", "S01_D05_PLT_25C_0hr" }; expected = { "S01_D01_PLT_37C_22hr", "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_6.5hr", "S01_D01_PLT_25C_0hr", "S01_D01_PLT_37C_22hr", "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_6.5hr", "S01_D01_PLT_25C_0hr" }; computeDataSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, false); predicted = { "S01_D01_PLT_37C_22hr", "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_6.5hr", "S01_D01_PLT_25C_0hr", "S01_D02_PLT_37C_22hr", "S01_D02_PLT_25C_22hr", "S01_D02_PLT_25C_6.5hr", "S01_D02_PLT_25C_0hr", "S01_D05_PLT_37C_22hr", "S01_D05_PLT_25C_22hr", "S01_D05_PLT_25C_6.5hr", "S01_D05_PLT_25C_0hr" }; expected = { "S01_D01_PLT_37C_22hr", "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_6.5hr", "S01_D01_PLT_25C_0hr", "S01_D02_PLT_37C_22hr", "S01_D02_PLT_25C_22hr", "S01_D02_PLT_25C_6.5hr", "S01_D02_PLT_25C_0hr", "S01_D05_PLT_37C_22hr", "S01_D05_PLT_25C_22hr", "S01_D05_PLT_25C_6.5hr", "S01_D05_PLT_25C_0hr" }; computeDataSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, false); } if (compute_generation_similarity) { predicted = { "S01_D01_PLT_37C_22hr", "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_6.5hr", "S01_D01_PLT_25C_0hr", "S01_D02_PLT_37C_22hr", "S01_D02_PLT_25C_22hr", "S01_D02_PLT_25C_6.5hr", "S01_D02_PLT_25C_0hr", "S01_D05_PLT_37C_22hr", "S01_D05_PLT_25C_22hr", "S01_D05_PLT_25C_6.5hr", "S01_D05_PLT_25C_0hr" }; expected = { "S01_D01_PLT_37C_22hr", "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_6.5hr", "S01_D01_PLT_25C_0hr", "S01_D02_PLT_37C_22hr", "S01_D02_PLT_25C_22hr", "S01_D02_PLT_25C_6.5hr", "S01_D02_PLT_25C_0hr", "S01_D05_PLT_37C_22hr", "S01_D05_PLT_25C_22hr", "S01_D05_PLT_25C_6.5hr", "S01_D05_PLT_25C_0hr" }; computeGenerationSimilarity(predicted, expected, latentArithmetic, metric_function, model_trainer, model_logger, true); } if (compute_latent_arithmetic) { // 1. drug + degradation -> drug & degradation condition_1 = { "S01_D02_PLT_25C_0hr", "S01_D05_PLT_25C_0hr" }; condition_2 = { "S01_D01_PLT_37C_22hr", "S01_D01_PLT_37C_22hr" }; expected = { "S01_D02_PLT_37C_22hr", "S01_D05_PLT_37C_22hr" }; computeLatentArithmeticSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, "+"); // 2. drug + metabolic -> drug & metabolic condition_1 = { "S01_D02_PLT_25C_0hr", "S01_D05_PLT_25C_0hr" }; condition_2 = { "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_22hr" }; expected = { "S01_D02_PLT_25C_22hr", "S01_D05_PLT_25C_22hr" }; computeLatentArithmeticSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, "+"); // 3. drug & metabolic - metabolic -> drug condition_1 = { "S01_D02_PLT_25C_22hr", "S01_D05_PLT_25C_22hr" }; condition_2 = { "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_22hr" }; expected = { "S01_D02_PLT_25C_0hr", "S01_D05_PLT_25C_0hr" }; computeLatentArithmeticSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, "-"); // 4. drug & metabolic - drug -> metabolic condition_1 = { "S01_D02_PLT_25C_22hr", "S01_D05_PLT_25C_22hr" }; condition_2 = { "S01_D02_PLT_25C_0hr", "S01_D05_PLT_25C_0hr" }; expected = { "S01_D01_PLT_25C_22hr", "S01_D01_PLT_25C_22hr" }; computeLatentArithmeticSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, "-"); // 5. drug & degradation - degradation -> drug condition_1 = { "S01_D02_PLT_37C_22hr", "S01_D05_PLT_37C_22hr" }; condition_2 = { "S01_D01_PLT_37C_22hr", "S01_D01_PLT_37C_22hr" }; expected = { "S01_D02_PLT_25C_0hr", "S01_D05_PLT_25C_0hr" }; computeLatentArithmeticSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, "-"); } if (compute_latent_interpolation) { condition_1 = { "S01_D01_PLT_25C_0hr", "S01_D02_PLT_25C_0hr", "S01_D05_PLT_25C_0hr" }; condition_2 = { "S01_D01_PLT_25C_22hr", "S01_D02_PLT_25C_22hr", "S01_D05_PLT_25C_22hr" }; const std::vector<std::vector<std::string>> expected = { {"S01_D01_PLT_25C_0hr", "S01_D01_PLT_25C_2hr", "S01_D01_PLT_25C_6.5hr", "S01_D01_PLT_25C_22hr"}, {"S01_D02_PLT_25C_0hr", "S01_D02_PLT_25C_2hr", "S01_D02_PLT_25C_6.5hr", "S01_D02_PLT_25C_22hr"}, {"S01_D05_PLT_25C_0hr", "S01_D05_PLT_25C_2hr", "S01_D05_PLT_25C_6.5hr", "S01_D05_PLT_25C_22hr"}, }; computeLatentInterpolationSimilarity(condition_1, condition_2, expected, latentArithmetic, metric_function, model_trainer, model_logger, true, true, true, true); } } // Main int main(int argc, char** argv) { /// KALE and Industrial strains //// Set the data directories //const std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_KALE/"; const std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_KALE/"; //const std::string data_dir = "/home/user/Data/"; // Set the biochemical reaction filenames const std::string biochem_rxns_filename = data_dir + "iJO1366.csv"; // Set the model filenames const std::string model_encoder_weights_filename = data_dir + "TrainTestData/SampledArithmeticMath/VAE_weights.csv"; const std::string model_decoder_weights_filename = data_dir + "TrainTestData/SampledArithmeticMath/VAE_weights.csv"; // NOTE: be sure to re-name the Input_000000000000-LinearScale_to_... weights to Input_000000000000_to_... // using regex "-LinearScale_to_FC0" with "_to_FC0" const std::string model_classifier_weights_filename = data_dir + "TrainTestData/SampledArithmeticMath/Classifier_5000_weights.csv"; // ALEsKOs01 const std::string metabo_data_filename_train = data_dir + "ALEsKOs01_Metabolomics_train.csv"; const std::string meta_data_filename_train = data_dir + "ALEsKOs01_MetaData_train.csv"; const std::string metabo_data_filename_test = data_dir + "ALEsKOs01_Metabolomics_test.csv"; const std::string meta_data_filename_test = data_dir + "ALEsKOs01_MetaData_test.csv"; //// IndustrialStrains0103 //const std::string metabo_data_filename_train = data_dir + "IndustrialStrains0103_Metabolomics_train.csv"; //const std::string meta_data_filename_train = data_dir + "IndustrialStrains0103_MetaData_train.csv"; //const std::string metabo_data_filename_test = data_dir + "IndustrialStrains0103_Metabolomics_test.csv"; //const std::string meta_data_filename_test = data_dir + "IndustrialStrains0103_MetaData_test.csv"; /// PLTs //// Set the data directories ////const std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; //const std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Metabolomics_RBC_Platelet/"; ////const std::string data_dir = "/home/user/Data/"; //// Set the biochemical reaction filenames //const std::string biochem_rxns_filename = data_dir + "iAT_PLT_636.csv"; ////const std::string biochem_rxns_filename = data_dir + "iAB_RBC_283.csv"; //// Set the model filenames //const std::string model_encoder_weights_filename = data_dir + "VAE_weights.csv"; //const std::string model_decoder_weights_filename = data_dir + "VAE_weights.csv"; //// Platelets //const std::string metabo_data_filename_train = data_dir + "PLT_timeCourse_Metabolomics_train.csv"; //const std::string meta_data_filename_train = data_dir + "PLT_timeCourse_MetaData_train.csv"; //const std::string metabo_data_filename_test = data_dir + "PLT_timeCourse_Metabolomics_test.csv"; //const std::string meta_data_filename_test = data_dir + "PLT_timeCourse_MetaData_test.csv"; // Define the model trainers and resources for the trainers ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(512); //model_trainer.setBatchSize(1); // Logging only model_trainer.setMemorySize(1); model_trainer.setNEpochsEvaluation(1); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(false, false, false); //model_trainer.setLogging(false, false, true); // Logging only model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setPreserveOoO(true); // Define the model logger ModelLogger<float> model_logger(false, false, false, false, false, true, false, true); // Read in the metabolomics data and models LatentArithmetic<float> latentArithmetic(16, false, true, true, std::string("Evo04")); latentArithmetic.setMetabolomicsData(biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test); latentArithmetic.setEncDecModels(model_trainer, model_encoder_weights_filename, model_decoder_weights_filename, 64, 64, 0, 64, 64, 0); latentArithmetic.setEncDecModelInterpreters(model_interpreter, model_interpreter); // Run the script //main_KALE(model_interpreter, model_trainer, model_logger, latentArithmetic, PercentDifferenceTensorOp<float, Eigen::DefaultDevice>(), false, true, false, false); //main_KALE(model_interpreter, model_trainer, model_logger, latentArithmetic, EuclideanDistTensorOp<float, Eigen::DefaultDevice>(), false, true, false, false); //main_KALE(model_interpreter, model_trainer, model_logger, latentArithmetic, PearsonRTensorOp<float, Eigen::DefaultDevice>(), false, true, false, false); main_KALE(model_interpreter, model_trainer, model_logger, latentArithmetic, ManhattanDistTensorOp<float, Eigen::DefaultDevice>(), false, true, false, false); //main_KALE(model_interpreter, model_trainer, model_logger, latentArithmetic, LogarithmicDistTensorOp<float, Eigen::DefaultDevice>(), false, true, false, false); //main_KALE(model_interpreter, model_trainer, model_logger, latentArithmetic, JeffreysAndMatusitaDistTensorOp<float, Eigen::DefaultDevice>(), false, true, false, false); //main_IndustrialStrains(model_interpreter, model_trainer, model_logger, latentArithmetic, true, false); //main_PLT(model_interpreter, model_trainer, model_logger, latentArithmetic, false, true, true, true); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_PARAMETERS_H #define EVONET_PARAMETERS_H // .h #include <tuple> #include <string> // .cpp #include <EvoNet/io/csv.h> #include <EvoNet/io/CSVWriter.h> namespace EvoNet { /// Implementation of `std::invoke` for C++11 on CUDA /// Referece: https://stackoverflow.com/questions/34668720/stdapply-may-not-be-properly-implemented template <typename F, typename Tuple, size_t... I> decltype(auto) apply_impl(F&& f, Tuple&& t, std::index_sequence<I...>) { return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...); } template <typename F, typename Tuple> decltype(auto) apply(F&& f, Tuple&& t) { using Indices = std::make_index_sequence<std::tuple_size<std::decay_t<Tuple>>::value>; return apply_impl(std::forward<F>(f), std::forward<Tuple>(t), Indices{}); } /// List of all available parameters and their types namespace EvoNetParameters { /** @brief Parameter */ template<typename T> struct Parameter { std::string name_; std::string s_; T value_; Parameter(const std::string& name, const T& value) : name_(name), value_(value) { std::stringstream ss; ss << value; ss >> s_; }; void set() { if (!s_.empty()) { std::stringstream ss; ss << s_; ss >> value_; } } T get() { return value_; } friend std::ostream& operator<<(std::ostream& os, const Parameter& parameter) { os << parameter.name_ << ": " << parameter.value_; return os; } }; namespace General { struct ID : Parameter<int> { using Parameter::Parameter; }; struct DataDir : Parameter<std::string> { using Parameter::Parameter; }; struct OutputDir : Parameter<std::string> { using Parameter::Parameter; }; struct InputDir : Parameter<std::string> { using Parameter::Parameter; }; } namespace Main { struct MakeModel : Parameter<bool> { using Parameter::Parameter; }; struct TrainModel : Parameter<bool> { using Parameter::Parameter; }; struct EvolveModel : Parameter<bool> { using Parameter::Parameter; }; struct EvaluateModel : Parameter<bool> { using Parameter::Parameter; }; struct EvaluateModels : Parameter<bool> { using Parameter::Parameter; }; struct LoadModelCsv : Parameter<bool> { using Parameter::Parameter; }; struct LoadModelBinary : Parameter<bool> { using Parameter::Parameter; }; struct ModelName : Parameter<std::string> { using Parameter::Parameter; }; struct DeviceId : Parameter<int> { using Parameter::Parameter; }; } namespace PopulationTrainer { struct PopulationName : Parameter<std::string> { using Parameter::Parameter; }; struct PopulationSize : Parameter<int> { using Parameter::Parameter; }; struct NInterpreters : Parameter<int> { using Parameter::Parameter; }; struct NTop : Parameter<int> { using Parameter::Parameter; }; struct NRandom : Parameter<int> { using Parameter::Parameter; }; struct NReplicatesPerModel : Parameter<int> { using Parameter::Parameter; }; struct Logging : Parameter<bool> { using Parameter::Parameter; }; struct RemoveIsolatedNodes : Parameter<bool> { using Parameter::Parameter; }; struct PruneModelNum : Parameter<int> { using Parameter::Parameter; }; struct CheckCompleteModelInputToOutput : Parameter<bool> { using Parameter::Parameter; }; //struct SelectModels : Parameter<bool> { using Parameter::Parameter; }; struct ResetModelCopyWeights : Parameter<bool> { using Parameter::Parameter; }; struct ResetModelTemplateWeights : Parameter<bool> { using Parameter::Parameter; }; struct NGenerations : Parameter<int> { using Parameter::Parameter; }; struct SetPopulationSizeFixed : Parameter<bool> { using Parameter::Parameter; }; struct SetPopulationSizeDoubling : Parameter<bool> { using Parameter::Parameter; }; struct SetTrainingStepsByModelSize : Parameter<bool> { using Parameter::Parameter; }; } namespace ModelReplicator { struct NNodeDownAdditionsLB : Parameter<int> { using Parameter::Parameter; }; struct NNodeRightAdditionsLB : Parameter<int> { using Parameter::Parameter; }; struct NNodeDownCopiesLB : Parameter<int> { using Parameter::Parameter; }; struct NNodeRightCopiesLB : Parameter<int> { using Parameter::Parameter; }; struct NLinkAdditionsLB : Parameter<int> { using Parameter::Parameter; }; struct NLinkCopiesLB : Parameter<int> { using Parameter::Parameter; }; struct NNodeDeletionsLB : Parameter<int> { using Parameter::Parameter; }; struct NLinkDeletionsLB : Parameter<int> { using Parameter::Parameter; }; struct NNodeActivationChangesLB : Parameter<int> { using Parameter::Parameter; }; struct NNodeIntegrationChangesLB : Parameter<int> { using Parameter::Parameter; }; struct NModuleAdditionsLB : Parameter<int> { using Parameter::Parameter; }; struct NModuleCopiesLB : Parameter<int> { using Parameter::Parameter; }; struct NModuleDeletionsLB : Parameter<int> { using Parameter::Parameter; }; struct NNodeDownAdditionsUB : Parameter<int> { using Parameter::Parameter; }; struct NNodeRightAdditionsUB : Parameter<int> { using Parameter::Parameter; }; struct NNodeDownCopiesUB : Parameter<int> { using Parameter::Parameter; }; struct NNodeRightCopiesUB : Parameter<int> { using Parameter::Parameter; }; struct NLinkAdditionsUB : Parameter<int> { using Parameter::Parameter; }; struct NLinkCopiesUB : Parameter<int> { using Parameter::Parameter; }; struct NNodeDeletionsUB : Parameter<int> { using Parameter::Parameter; }; struct NLinkDeletionsUB : Parameter<int> { using Parameter::Parameter; }; struct NNodeActivationChangesUB : Parameter<int> { using Parameter::Parameter; }; struct NNodeIntegrationChangesUB : Parameter<int> { using Parameter::Parameter; }; struct NModuleAdditionsUB : Parameter<int> { using Parameter::Parameter; }; struct NModuleCopiesUB : Parameter<int> { using Parameter::Parameter; }; struct NModuleDeletionsUB : Parameter<int> { using Parameter::Parameter; }; struct SetModificationRateFixed : Parameter<bool> { using Parameter::Parameter; }; struct SetModificationRateByPrevError : Parameter<bool> { using Parameter::Parameter; }; } namespace ModelTrainer { struct BatchSize : Parameter<int> { using Parameter::Parameter; }; struct MemorySize : Parameter<int> { using Parameter::Parameter; }; struct NEpochsTraining : Parameter<int> { using Parameter::Parameter; }; struct NEpochsValidation : Parameter<int> { using Parameter::Parameter; }; struct NEpochsEvaluation : Parameter<int> { using Parameter::Parameter; }; struct Verbosity : Parameter<int> { using Parameter::Parameter; }; struct LoggingTraining : Parameter<bool> { using Parameter::Parameter; }; struct LoggingValidation : Parameter<bool> { using Parameter::Parameter; }; struct LoggingEvaluation : Parameter<bool> { using Parameter::Parameter; }; struct NTBTTSteps : Parameter<int> { using Parameter::Parameter; }; struct NTETTSteps : Parameter<int> { using Parameter::Parameter; }; struct FindCycles : Parameter<bool> { using Parameter::Parameter; }; struct FastInterpreter : Parameter<bool> { using Parameter::Parameter; }; struct PreserveOoO : Parameter<bool> { using Parameter::Parameter; }; struct InterpretModel : Parameter<bool> { using Parameter::Parameter; }; struct ResetModel : Parameter<bool> { using Parameter::Parameter; }; struct ResetInterpreter : Parameter<bool> { using Parameter::Parameter; }; struct LossFunction : Parameter<std::string> { using Parameter::Parameter; }; /// Model building struct NHidden0 : Parameter<int> { using Parameter::Parameter; }; struct NHidden1 : Parameter<int> { using Parameter::Parameter; }; struct NHidden2 : Parameter<int> { using Parameter::Parameter; }; struct LossFncWeight0 : Parameter<float> { using Parameter::Parameter; }; struct LossFncWeight1 : Parameter<float> { using Parameter::Parameter; }; struct LossFncWeight2 : Parameter<float> { using Parameter::Parameter; }; struct AddGaussian : Parameter<bool> { using Parameter::Parameter; }; struct AddMixedGaussian : Parameter<bool> { using Parameter::Parameter; }; struct AddCategorical : Parameter<bool> { using Parameter::Parameter; }; struct LearningRate : Parameter<float> { using Parameter::Parameter; }; struct GradientClipping : Parameter<float> { using Parameter::Parameter; }; struct KLDivergenceWarmup : Parameter<bool> { using Parameter::Parameter; }; struct NEncodingsContinuous : Parameter<int> { using Parameter::Parameter; }; struct NEncodingsCategorical : Parameter<int> { using Parameter::Parameter; }; struct BetaC : Parameter<float> { using Parameter::Parameter; }; struct BetaD : Parameter<float> { using Parameter::Parameter; }; struct CapacityC : Parameter<float> { using Parameter::Parameter; }; struct CapacityD : Parameter<float> { using Parameter::Parameter; }; } namespace Examples { struct NMask : Parameter<int> { using Parameter::Parameter; }; struct SequenceLength : Parameter<int> { using Parameter::Parameter; }; struct SimulationType : Parameter<std::string> { using Parameter::Parameter; }; struct ModelType : Parameter<std::string> { using Parameter::Parameter; }; struct BiochemicalRxnsFilename : Parameter<std::string> { using Parameter::Parameter; }; struct SupervisionWarmup : Parameter<bool> { using Parameter::Parameter; }; struct SupervisionPercent : Parameter<float> { using Parameter::Parameter; }; } } /// Helper method to statically deduce the size of a tuple template<class Tuple> constexpr size_t sizeOfParameters(const Tuple& t) { return std::tuple_size<Tuple>::value; } /* @brief Helper method to parse the command line arguments @param[in] argc @param[in] argv @param[in,out] id @param[in,out] parameters_file */ void parseCommandLineArguments(int argc, char** argv, int& id, std::string& parameters_file) { if (argc >= 2) { try { id = std::stoi(argv[1]); } catch (std::exception & e) { std::cout << e.what() << std::endl; } } if (argc >= 3) { parameters_file = std::string(argv[2]); } } /* @brief Struct to load parameters from csv */ struct LoadParametersFromCsv { LoadParametersFromCsv(const int& id, const std::string& parameters_filename) :id_(id), parameters_filename_(parameters_filename) {} int id_; std::string parameters_filename_; /* @brief Load the parameters from file @param[in] argc @param[in] argv @param[in,out] id @param[in,out] parameters_file */ template<class ...ParameterTypes> std::tuple<ParameterTypes...> operator()(ParameterTypes&... args) { auto parameters = std::make_tuple(args...); // Read in the parameters io::CSVReader<sizeOfParameters(parameters)> parameters_in(parameters_filename_); EvoNet::apply([&parameters_in](auto&& ...args) { parameters_in.read_header(io::ignore_extra_column, args.name_ ...); }, parameters); while (EvoNet::apply([&parameters_in](auto&& ...args) { return parameters_in.read_row(args.s_ ...); }, parameters)) { if (std::to_string(id_) == std::get<EvoNetParameters::General::ID>(parameters).s_) { //EvoNet::apply([](auto&& ...args) {((args.set()), ...); }, parameters); // C++17 EvoNet::apply([](auto&& ...args) { // C++11/CUDA using expander = int[]; (void)expander {0, (void(args.set()), 0)...}; }, parameters); break; } } // Print the read in parameters to the screen //EvoNet::apply([](auto&&... args) {((std::cout << args << std::endl), ...); }, parameters); // C++17 EvoNet::apply([](auto&&... args) { // C++11/CUDA using expander = int[]; (void)expander { 0, (void(std::cout << args << std::endl), 0)... }; }, parameters); return parameters; } }; /// Helper method to set the PopulationTrainer parameters template<typename PopulationTrainerT, class ...ParameterTypes> void setPopulationTrainerParameters(PopulationTrainerT& population_trainer, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // set the population trainer parameters population_trainer.setNGenerations(std::get<EvoNetParameters::PopulationTrainer::NGenerations>(parameters).get()); population_trainer.setPopulationSize(std::get<EvoNetParameters::PopulationTrainer::PopulationSize>(parameters).get()); population_trainer.setNReplicatesPerModel(std::get<EvoNetParameters::PopulationTrainer::NReplicatesPerModel>(parameters).get()); population_trainer.setNTop(std::get<EvoNetParameters::PopulationTrainer::NTop>(parameters).get()); population_trainer.setNRandom(std::get<EvoNetParameters::PopulationTrainer::NRandom>(parameters).get()); population_trainer.setLogging(std::get<EvoNetParameters::PopulationTrainer::Logging>(parameters).get()); population_trainer.setRemoveIsolatedNodes(std::get<EvoNetParameters::PopulationTrainer::RemoveIsolatedNodes>(parameters).get()); population_trainer.setPruneModelNum(std::get<EvoNetParameters::PopulationTrainer::PruneModelNum>(parameters).get()); population_trainer.setCheckCompleteModelInputToOutput(std::get<EvoNetParameters::PopulationTrainer::CheckCompleteModelInputToOutput>(parameters).get()); population_trainer.setResetModelCopyWeights(std::get<EvoNetParameters::PopulationTrainer::ResetModelCopyWeights>(parameters).get()); population_trainer.setResetModelTemplateWeights(std::get<EvoNetParameters::PopulationTrainer::ResetModelTemplateWeights>(parameters).get()); population_trainer.set_population_size_fixed_ = std::get<EvoNetParameters::PopulationTrainer::SetPopulationSizeFixed>(parameters).get(); population_trainer.set_population_size_doubling_ = std::get<EvoNetParameters::PopulationTrainer::SetPopulationSizeDoubling>(parameters).get(); population_trainer.set_training_steps_by_model_size_ = std::get<EvoNetParameters::PopulationTrainer::SetTrainingStepsByModelSize>(parameters).get(); } /// Helper method to set the ModelReplicator parameters template<typename ModelReplicatorT, class ...ParameterTypes> void setModelReplicatorParameters(ModelReplicatorT& model_replicator, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // set the model replicator parameters model_replicator.setNodeActivations({ std::make_pair(std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>())), std::make_pair(std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>())), std::make_pair(std::make_shared<ELUOp<float>>(ELUOp<float>()), std::make_shared<ELUGradOp<float>>(ELUGradOp<float>())), std::make_pair(std::make_shared<SigmoidOp<float>>(SigmoidOp<float>()), std::make_shared<SigmoidGradOp<float>>(SigmoidGradOp<float>())), std::make_pair(std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>()))//, //std::make_pair(std::make_shared<ExponentialOp<float>>(ExponentialOp<float>()), std::make_shared<ExponentialGradOp<float>>(ExponentialGradOp<float>())), //std::make_pair(std::make_shared<LogOp<float>>(LogOp<float>()), std::make_shared<LogGradOp<float>>(LogGradOp<float>())), //std::make_pair(std::shared_ptr<ActivationOp<float>>(new InverseOp<float>()), std::shared_ptr<ActivationOp<float>>(new InverseGradOp<float>())) }); model_replicator.setNodeIntegrations({ std::make_tuple(std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>())), std::make_tuple(std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())), //std::make_tuple(std::make_shared<MeanOp<float>>(MeanOp<float>()), std::make_shared<MeanErrorOp<float>>(MeanErrorOp<float>()), std::make_shared<MeanWeightGradO<float>>(MeanWeightGradOp<float>())), //std::make_tuple(std::make_shared<VarModOp<float>>(VarModOp<float>()), std::make_shared<VarModErrorOp<float>>(VarModErrorOp<float>()), std::make_shared<VarModWeightGradOp<float>>(VarModWeightGradOp<float>())), //std::make_tuple(std::make_shared<CountOp<float>>(CountOp<float>()), std::make_shared<CountErrorOp<float>>(CountErrorOp<float>()), std::make_shared<CountWeightGradOp<float>>(CountWeightGradOp<float>())) }); model_replicator.set_modification_rate_by_prev_error_ = std::get<EvoNetParameters::ModelReplicator::SetModificationRateByPrevError>(parameters).get(); model_replicator.set_modification_rate_fixed_ = std::get<EvoNetParameters::ModelReplicator::SetModificationRateFixed>(parameters).get(); model_replicator.setRandomModifications( std::make_pair(std::get<EvoNetParameters::ModelReplicator::NNodeDownAdditionsLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NNodeDownAdditionsUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NNodeRightAdditionsLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NNodeRightAdditionsUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NNodeDownCopiesLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NNodeDownCopiesUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NNodeRightCopiesLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NNodeRightCopiesUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NLinkAdditionsLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NLinkAdditionsUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NLinkCopiesLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NLinkCopiesUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NNodeDeletionsLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NNodeDeletionsUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NLinkDeletionsLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NLinkDeletionsUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NNodeActivationChangesLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NNodeActivationChangesUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NNodeIntegrationChangesLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NNodeIntegrationChangesUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NModuleAdditionsLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NModuleAdditionsUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NModuleCopiesLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NModuleCopiesUB>(parameters).get()), std::make_pair(std::get<EvoNetParameters::ModelReplicator::NModuleDeletionsLB>(parameters).get(), std::get<EvoNetParameters::ModelReplicator::NModuleDeletionsUB>(parameters).get())); } /// Helper method to set the number of threads and assign the resources for the model interpreters template<typename ModelInterpreterT, class ...ParameterTypes> void setModelInterpreterParameters(std::vector<ModelInterpreterT>& model_interpreters, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the multithreading parameters const int n_hard_threads = std::thread::hardware_concurrency(); const int n_threads = (std::get<EvoNetParameters::PopulationTrainer::NInterpreters>(parameters).get() > n_hard_threads) ? n_hard_threads : std::get<EvoNetParameters::PopulationTrainer::NInterpreters>(parameters).get(); // the number of threads // define the model trainers and resources for the trainers for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(std::get<EvoNetParameters::Main::DeviceId>(parameters).get(), 1) }; ModelInterpreterT model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } } /// Helper method to set the ModelTrainer parameters template<typename ModelTrainerT, class ...ParameterTypes> void setModelTrainerParameters(ModelTrainerT& model_trainer, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // set the model trainer model_trainer.setBatchSize(std::get<EvoNetParameters::ModelTrainer::BatchSize>(parameters).get()); model_trainer.setMemorySize(std::get<EvoNetParameters::ModelTrainer::MemorySize>(parameters).get()); model_trainer.setNEpochsTraining(std::get<EvoNetParameters::ModelTrainer::NEpochsTraining>(parameters).get()); model_trainer.setNEpochsValidation(std::get<EvoNetParameters::ModelTrainer::NEpochsValidation>(parameters).get()); model_trainer.setNEpochsEvaluation(std::get<EvoNetParameters::ModelTrainer::NEpochsEvaluation>(parameters).get()); model_trainer.setNTBPTTSteps(std::get<EvoNetParameters::ModelTrainer::NTBTTSteps>(parameters).get()); model_trainer.setNTETTSteps(std::get<EvoNetParameters::ModelTrainer::NTETTSteps>(parameters).get()); model_trainer.setVerbosityLevel(std::get<EvoNetParameters::ModelTrainer::Verbosity>(parameters).get()); model_trainer.setLogging(std::get<EvoNetParameters::ModelTrainer::LoggingTraining>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::LoggingValidation>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::LoggingEvaluation>(parameters).get()); model_trainer.setFindCycles(std::get<EvoNetParameters::ModelTrainer::FindCycles>(parameters).get()); model_trainer.setFastInterpreter(std::get<EvoNetParameters::ModelTrainer::FastInterpreter>(parameters).get()); model_trainer.setPreserveOoO(std::get<EvoNetParameters::ModelTrainer::PreserveOoO>(parameters).get()); model_trainer.setInterpretModel(std::get<EvoNetParameters::ModelTrainer::InterpretModel>(parameters).get()); model_trainer.setResetModel(std::get<EvoNetParameters::ModelTrainer::ResetModel>(parameters).get()); model_trainer.setResetInterpreter(std::get<EvoNetParameters::ModelTrainer::ResetInterpreter>(parameters).get()); } /// Helper method to read in a trained model template<typename ModelT, typename InterpreterT, typename ModelFileT, typename InterpreterFileT, class ...ParameterTypes> void loadModelFromParameters(ModelT& model, InterpreterT& interpreter, ModelFileT& model_file, InterpreterFileT& interpreter_file, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // read in the trained model if (std::get<EvoNetParameters::Main::LoadModelBinary>(parameters).get()) { std::cout << "Reading in the model from binary..." << std::endl; model_file.loadModelBinary(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_model.binary", model); model.setId(1); interpreter_file.loadModelInterpreterBinary(std::get<EvoNetParameters::General::OutputDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_interpreter.binary", interpreter); } else if (std::get<EvoNetParameters::Main::LoadModelCsv>(parameters).get()) { // read in the trained model std::cout << "Reading in the model from csv..." << std::endl; model_file.loadModelCsv(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_nodes.csv", std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_links.csv", std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_weights.csv", model, true, true, true); model.setId(1); } } /// Helper method to train, evaluate, or evolve from parameters template<typename TensorT, typename ModelT, typename InterpreterT, typename ModelTrainerT, typename PopulationTrainerT, typename ModelReplicatorT, typename DataSimulatorT, typename ModelLoggerT, typename PopulationLoggerT, class ...ParameterTypes> void runTrainEvalEvoFromParameters(ModelT& model, std::vector<InterpreterT>& model_interpreters, ModelTrainerT& model_trainer, PopulationTrainerT& population_trainer, ModelReplicatorT& model_replicator, DataSimulatorT& data_simulator, ModelLoggerT& model_logger, PopulationLoggerT& population_logger, const std::vector<std::string>& input_nodes, const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); if (std::get<EvoNetParameters::Main::TrainModel>(parameters).get()) { // Train the model model.setName(model.getName() + "_train"); std::pair<std::vector<TensorT>, std::vector<TensorT>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); } else if (std::get<EvoNetParameters::Main::EvolveModel>(parameters).get()) { // Evolve the population std::vector<ModelT> population = { model }; std::vector<std::vector<std::tuple<int, std::string, TensorT>>> models_validation_errors_per_generation = population_trainer.evolveModels( population, std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::PopulationTrainer::PopulationName>(parameters).get(), //So that all output will be written to a specific directory model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); //// Write the evolved population to disk //PopulationTrainerFile<float> population_trainer_file; //population_trainer_file.storeModels(population, std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::PopulationTrainer::PopulationName>(parameters).get()); //population_trainer_file.storeModelValidations(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::PopulationTrainer::PopulationName>(parameters).get() + "Errors.csv", models_validation_errors_per_generation); } else if (std::get<EvoNetParameters::Main::EvaluateModel>(parameters).get()) { // Evaluate the model model.setName(model.getName() + "_evaluation"); Eigen::Tensor<TensorT, 4> model_output = model_trainer.evaluateModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); } else if (std::get<EvoNetParameters::Main::EvaluateModels>(parameters).get()) { // Evaluate the population std::vector<Model<TensorT>> population = { model }; population_trainer.evaluateModels(population, std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::PopulationTrainer::PopulationName>(parameters).get(), model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, input_nodes); } } } #endif //EVONET_PARAMETERS_H<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelReplicatorExperimental.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/simulator/AddProbSimulator.h> #include <EvoNet/io/Parameters.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; /* @brief Add problem genetic + deep learning algorithm examples Experiments: 1. addProb and single model training with the solution model initialized to the correct weights 2. addProb and single model training with solution model and weight dev from the correct weights 3. addProb and population training with the solution model as the population seed 4. addProb and population training with the minimal model as the population seed 5. addProb and single model training with the LSTM architecture 6. addProb and population training with the LSTM model as the population seed Hyper parameters: 1. Adam solver with a learning rate of 0.001 2. Batch size of 32 3. 5000 epochs (single model training); 50 epochs (population training) 4. 25 epochs testing */ template<typename TensorT> class DataSimulatorExt : public AddProbSimulator<TensorT> { public: void simulateData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); //// generate a new sequence //// TODO: ensure that the this->sequence_length_ >= memory_size! //Eigen::Tensor<TensorT, 1> random_sequence(this->sequence_length_); //Eigen::Tensor<TensorT, 1> mask_sequence(this->sequence_length_); //float result = this->AddProb(random_sequence, mask_sequence, this->n_mask_); // Generate the input and output data for training [BUG FREE] for (int batch_iter = 0; batch_iter<batch_size; ++batch_iter) { for (int epochs_iter = 0; epochs_iter<n_epochs; ++epochs_iter) { // generate a new sequence // TODO: ensure that the this->sequence_length_ >= memory_size! Eigen::Tensor<float, 1> random_sequence(this->sequence_length_); Eigen::Tensor<float, 1> mask_sequence(this->sequence_length_); float result = this->AddProb(random_sequence, mask_sequence, this->n_mask_); Eigen::Tensor<float, 1> cumulative(this->sequence_length_); cumulative.setZero(); float result_cumulative = 0.0; for (int memory_iter = 0; memory_iter<memory_size; ++memory_iter) { // determine the cumulative vector result_cumulative += random_sequence(memory_iter) * mask_sequence(memory_iter); cumulative(memory_iter) = result_cumulative; //std::cout << "result cumulative: " << result_cumulative << std::endl; // [TESTS: convert to a test!] } //for (int memory_iter = memory_size - 1; memory_iter >= 0; --memory_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // assign the input sequences input_data(batch_iter, memory_iter, 0, epochs_iter) = random_sequence(memory_size - memory_iter - 1); // random sequence input_data(batch_iter, memory_iter, 1, epochs_iter) = mask_sequence(memory_size - memory_iter - 1); // mask sequence // assign the output output_data(batch_iter, memory_iter, 0, epochs_iter) = cumulative(memory_size - memory_iter - 1); //if (memory_iter == 0) // output_data(batch_iter, memory_iter, 0, epochs_iter) = result; //else // output_data(batch_iter, memory_iter, 0, epochs_iter) = 0.0; } } } //std::cout << "Input data: " << input_data << std::endl; // [TESTS: convert to a test!] //std::cout << "Output data: " << output_data << std::endl; // [TESTS: convert to a test!] time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override { simulateData(input_data, output_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override { simulateData(input_data, output_data, time_steps); } void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps)override {}; void simulateData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); // Generate the input and output data for training [BUG FREE] for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { // generate a new sequence // TODO: ensure that the this->sequence_length_ >= memory_size! Eigen::Tensor<float, 1> random_sequence(this->sequence_length_); Eigen::Tensor<float, 1> mask_sequence(this->sequence_length_); float result = this->AddProb(random_sequence, mask_sequence, this->n_mask_); Eigen::Tensor<float, 1> cumulative(this->sequence_length_); cumulative.setZero(); float result_cumulative = 0.0; for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // determine the cumulative vector result_cumulative += random_sequence(memory_iter) * mask_sequence(memory_iter); cumulative(memory_iter) = result_cumulative; } //for (int memory_iter = memory_size - 1; memory_iter >= 0; --memory_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // assign the input sequences input_data(batch_iter, memory_iter, 0) = random_sequence(memory_size - memory_iter - 1); // random sequence input_data(batch_iter, memory_iter, 1) = mask_sequence(memory_size - memory_iter - 1); // mask sequence // assign the output output_data(batch_iter, memory_iter, 0) = cumulative(memory_size - memory_iter - 1); metric_data(batch_iter, memory_iter, 0) = cumulative(memory_size - memory_iter - 1); } } time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_data, Eigen::Tensor<TensorT, 2>& time_steps)override { simulateData(input_data, output_data, metric_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_data, Eigen::Tensor<TensorT, 2>& time_steps)override { simulateData(input_data, output_data, metric_data, time_steps); } void simulateEvaluationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& metric_data, Eigen::Tensor<TensorT, 2>& time_steps)override { simulateData(input_data, metric_data, Eigen::Tensor<TensorT, 3>(), time_steps); } }; // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerExperimentalDefaultDevice<TensorT> { public: /* @brief Minimal network */ void makeModelMinimal(Model<TensorT>& model) { Node<TensorT> i_rand, i_mask, h, o, output, h_bias, o_bias; Link Link_i_rand_to_h, Link_i_mask_to_h, Link_h_to_o, Link_o_to_output, Link_h_bias_to_h, Link_o_bias_to_o; Weight<TensorT> Weight_i_rand_to_h, Weight_i_mask_to_h, Weight_h_to_o, Weight_o_to_output, Weight_h_bias_to_h, Weight_o_bias_to_o; // Nodes i_rand = Node<TensorT>("Input_000000000000", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); i_mask = Node<TensorT>("Input_000000000001", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); h = Node<TensorT>("h", NodeType::hidden, NodeStatus::deactivated, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); o = Node<TensorT>("o", NodeType::unmodifiable, NodeStatus::deactivated, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); output = Node<TensorT>("Output_000000000000", NodeType::output, NodeStatus::deactivated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); h_bias = Node<TensorT>("h_bias", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); o_bias = Node<TensorT>("o_bias", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); output.setLayerName("Output"); // weights std::shared_ptr<WeightInitOp<TensorT>> weight_init = std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(1.0)); std::shared_ptr<SolverOp<TensorT>> solver = std::make_shared<SGDOp<TensorT>>(SGDOp<TensorT>(1e-3, 0.9, 10)); Weight_i_rand_to_h = Weight<TensorT>("Weight_i_rand_to_h", weight_init, solver); Weight_i_mask_to_h = Weight<TensorT>("Weight_i_mask_to_h", weight_init, solver); Weight_h_to_o = Weight<TensorT>("Weight_h_to_o", weight_init, solver); Weight_o_to_output = Weight<TensorT>("Weight_o_to_output", std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); Weight_h_bias_to_h = Weight<TensorT>("Weight_h_bias_to_h", weight_init, solver); Weight_o_bias_to_o = Weight<TensorT>("Weight_o_bias_to_o", std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0.0)), solver); weight_init.reset(); solver.reset(); // links Link_i_rand_to_h = Link("Link_i_rand_to_h", "Input_000000000000", "h", "Weight_i_rand_to_h"); Link_i_mask_to_h = Link("Link_i_mask_to_h", "Input_000000000001", "h", "Weight_i_mask_to_h"); Link_h_to_o = Link("Link_h_to_o", "h", "o", "Weight_h_to_o"); Link_o_to_output = Link("Link_o_to_output", "o", "Output_000000000000", "Weight_o_to_output"); Link_h_bias_to_h = Link("Link_h_bias_to_h", "h_bias", "h", "Weight_h_bias_to_h"); Link_o_bias_to_o = Link("Link_o_bias_to_o", "o_bias", "o", "Weight_o_bias_to_o"); // add nodes, links, and weights to the model model.setName("MemoryCell"); model.addNodes({ i_rand, i_mask, h, o, output//, h_bias, o_bias }); model.addWeights({ Weight_i_rand_to_h, Weight_i_mask_to_h, Weight_h_to_o, Weight_o_to_output//, Weight_h_bias_to_h, Weight_o_bias_to_o }); model.addLinks({ Link_i_rand_to_h, Link_i_mask_to_h, Link_h_to_o, Link_o_to_output//, Link_h_bias_to_h, Link_o_bias_to_o }); model.setInputAndOutputNodes(); } /* @brief Minimal network required to solve the addition problem */ void makeModelSolution(Model<TensorT>& model, bool init_weight_soln = true) { Node<TensorT> i_rand, i_mask, h, m, mr, o, output, h_bias, m_bias, o_bias; Link Link_i_rand_to_h, Link_i_mask_to_h, Link_h_to_m, Link_m_to_o, Link_m_to_mr, Link_mr_to_m, Link_h_bias_to_h, Link_m_bias_to_m, Link_o_bias_to_o, Link_o_to_output; Weight<TensorT> Weight_i_rand_to_h, Weight_i_mask_to_h, Weight_h_to_m, Weight_m_to_o, Weight_m_to_mr, Weight_mr_to_m, Weight_h_bias_to_h, Weight_m_bias_to_m, Weight_o_bias_to_o, Weight_o_to_output; // Nodes i_rand = Node<TensorT>("Input_000000000000", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); i_mask = Node<TensorT>("Input_000000000001", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); h = Node<TensorT>("h", NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<ProdOp<TensorT>>(ProdOp<TensorT>()),std::make_shared<ProdErrorOp<TensorT>>(ProdErrorOp<TensorT>()), std::make_shared<ProdWeightGradOp<TensorT>>(ProdWeightGradOp<TensorT>())); m = Node<TensorT>("m", NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); mr = Node<TensorT>("mr", NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); o = Node<TensorT>("o", NodeType::unmodifiable, NodeStatus::deactivated, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); output = Node<TensorT>("Output_000000000000", NodeType::output, NodeStatus::deactivated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); h_bias = Node<TensorT>("h_bias", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); m_bias = Node<TensorT>("m_bias", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); o_bias = Node<TensorT>("o_bias", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); output.setLayerName("Output"); // weights std::shared_ptr<WeightInitOp<TensorT>> weight_init; auto solver = std::make_shared<SGDOp<TensorT>>(SGDOp<TensorT>(1e-3, 0.9, 10)); if (init_weight_soln) { weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); //solution } else { weight_init = std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(1.0)); // will not converge //weight_init = std::make_shared<RangeWeightInitOp<TensorT>>(RangeWeightInitOp<TensorT>(0.5, 1.5)); // will converge with ADAM learning_rate < 1e-6 } Weight_i_rand_to_h = Weight<TensorT>("Weight_i_rand_to_h", weight_init, solver); Weight_i_mask_to_h = Weight<TensorT>("Weight_i_mask_to_h", weight_init, solver); Weight_h_to_m = Weight<TensorT>("Weight_h_to_m", weight_init, solver); Weight_m_to_mr = Weight<TensorT>("Weight_m_to_mr", std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); Weight_mr_to_m = Weight<TensorT>("Weight_mr_to_m", std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); Weight_m_to_o = Weight<TensorT>("Weight_m_to_o", weight_init, solver); Weight_o_to_output = Weight<TensorT>("Weight_o_to_output", std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); Weight_h_bias_to_h = Weight<TensorT>("Weight_h_bias_to_h", std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0.0)), solver); Weight_m_bias_to_m = Weight<TensorT>("Weight_m_bias_to_m", std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0.0)), solver); Weight_o_bias_to_o = Weight<TensorT>("Weight_o_bias_to_o", std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(0.0)), solver); weight_init.reset(); solver.reset(); // links Link_i_rand_to_h = Link("Link_i_rand_to_h", "Input_000000000000", "h", "Weight_i_rand_to_h"); Link_i_mask_to_h = Link("Link_i_mask_to_h", "Input_000000000001", "h", "Weight_i_mask_to_h"); Link_h_to_m = Link("Link_h_to_m", "h", "m", "Weight_h_to_m"); Link_m_to_o = Link("Link_m_to_o", "m", "o", "Weight_m_to_o"); Link_o_to_output = Link("Link_o_to_output", "o", "Output_000000000000", "Weight_o_to_output"); Link_m_to_mr = Link("Link_m_to_mr", "m", "mr", "Weight_m_to_mr"); Link_mr_to_m = Link("Link_mr_to_m", "mr", "m", "Weight_mr_to_m"); //Link_m_to_m = Link("Link_m_to_m", "m", "m", "Weight_m_to_m"); Link_h_bias_to_h = Link("Link_h_bias_to_h", "h_bias", "h", "Weight_h_bias_to_h"); Link_m_bias_to_m = Link("Link_m_bias_to_m", "m_bias", "m", "Weight_m_bias_to_m"); Link_o_bias_to_o = Link("Link_o_bias_to_o", "o_bias", "o", "Weight_o_bias_to_o"); // add nodes, links, and weights to the model model.setName("MemoryCell"); model.addNodes({ i_rand, i_mask, h, m, mr, o, output//, //h_bias, m_bias, o_bias }); model.addWeights({ Weight_i_rand_to_h, Weight_i_mask_to_h, Weight_h_to_m, Weight_m_to_o, Weight_m_to_mr, Weight_mr_to_m, Weight_o_to_output//, //Weight_h_bias_to_h, //Weight_m_bias_to_m, //Weight_o_bias_to_o }); model.addLinks({ Link_i_rand_to_h, Link_i_mask_to_h, Link_h_to_m, Link_m_to_o, Link_m_to_mr, Link_mr_to_m, Link_o_to_output//, //Link_h_bias_to_h, //Link_m_bias_to_m, //Link_o_bias_to_o }); model.setInputAndOutputNodes(); } /* @brief LSTM implementation References: Hochreiter et al. "Long Short-Term Memory". Neural Computation 9, 1735–1780 (1997) Chung et al. "Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling". 2014. arXiv:1412.3555v1 GRU implementation References: Cho et al. "Learning Phrase Representations using RNN Encoder–Decoder for Statistical Machine Translation". 2014. arXiv:1406.1078v3 Chung et al. "Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling". 2014. arXiv:1412.3555v1 */ void makeModelLSTM(Model<TensorT>& model, const int& n_inputs, int n_blocks = 2, int n_cells = 2, bool add_forget_gate = false, bool specify_layers = false) { model.setId(0); model.setName("LSTM"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation std::shared_ptr<ActivationOp<TensorT>> activation = std::make_shared<TanHOp<TensorT>>(TanHOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_grad = std::make_shared<TanHGradOp<TensorT>>(TanHGradOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_output = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_output_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<SGDOp<TensorT>>(SGDOp<TensorT>(1e-3, 0.9, 10)); // Add the LSTM layer(s) std::vector<std::string> node_names = model_builder.addLSTM(model, "LSTM-01", "LSTM-01", node_names_input, n_blocks, n_cells, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_input.size() + n_blocks) / 2, 1)), solver_op, 0.0f, 0.0f, true, add_forget_gate, 1, specify_layers); // Add a final output layer node_names = model_builder.addFullyConnected(model, "FC-Out", "FC-Out", node_names, 1, activation_output, activation_output_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 2)), solver_op, 0.0f, 0.0f, false, true); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, 1, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicatorExperimental<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerExperimentalDefaultDevice<TensorT> {}; template<class ...ParameterTypes> void main_(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the population trainer parameters PopulationTrainerExt<float> population_trainer; setPopulationTrainerParameters(population_trainer, args...); // define the population logger PopulationLogger<float> population_logger(true, true); // define the input/output nodes std::vector<std::string> input_nodes = { "Input_000000000000", "Input_000000000001" }; std::vector<std::string> output_nodes = { "Output_000000000000" }; // define the data simulator DataSimulatorExt<float> data_simulator; data_simulator.n_mask_ = std::get<EvoNetParameters::Examples::NMask>(parameters).get(); data_simulator.sequence_length_ = std::get<EvoNetParameters::Examples::SequenceLength>(parameters).get(); // define the model interpreters std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; setModelInterpreterParameters(model_interpreters, args...); // define the model trainer ModelTrainerExt<float> model_trainer; setModelTrainerParameters(model_trainer, args...); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper2; loss_function_helper2.output_nodes_ = output_nodes; loss_function_helper2.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-24, 1.0)) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-24, 1.0)) }; loss_function_helpers.push_back(loss_function_helper2); model_trainer.setLossFunctionHelpers(loss_function_helpers); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<EuclideanDistOp<float>>(EuclideanDistOp<float>("Mean")), std::make_shared<EuclideanDistOp<float>>(EuclideanDistOp<float>("Var")) }; metric_function_helper1.metric_names_ = { "EuclideanDist-Mean", "EuclideanDist-Var" }; metric_function_helpers.push_back(metric_function_helper1); model_trainer.setMetricFunctionHelpers(metric_function_helpers); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false); // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; setModelReplicatorParameters(model_replicator, args...); // define the initial population Model<float> model; if (std::get<EvoNetParameters::Main::MakeModel>(parameters).get()) { std::cout << "Making the model..." << std::endl; if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "Minimal") model_trainer.makeModelMinimal(model); else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "Solution") model_trainer.makeModelSolution(model, false); else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "LSTM") model_trainer.makeModelLSTM(model, input_nodes.size(), 1, 1, false); model.setId(0); } else { ModelFile<float> model_file; ModelInterpreterFileDefaultDevice<float> model_interpreter_file; loadModelFromParameters(model, model_interpreters.at(0), model_file, model_interpreter_file, args...); } model.setName(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get()); //So that all output will be written to a specific directory // Run the training, evaluation, or evolution runTrainEvalEvoFromParameters<float>(model, model_interpreters, model_trainer, population_trainer, model_replicator, data_simulator, model_logger, population_logger, input_nodes, args...); } // Main int main(int argc, char** argv) { // Parse the user commands int id_int = -1; std::string parameters_filename = ""; parseCommandLineArguments(argc, argv, id_int, parameters_filename); // Set the parameter names and defaults EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::Main::DeviceId device_id("device_id", 0); EvoNetParameters::Main::ModelName model_name("model_name", ""); EvoNetParameters::Main::MakeModel make_model("make_model", true); EvoNetParameters::Main::LoadModelCsv load_model_csv("load_model_csv", false); EvoNetParameters::Main::LoadModelBinary load_model_binary("load_model_binary", false); EvoNetParameters::Main::TrainModel train_model("train_model", true); EvoNetParameters::Main::EvolveModel evolve_model("evolve_model", false); EvoNetParameters::Main::EvaluateModel evaluate_model("evaluate_model", false); EvoNetParameters::Main::EvaluateModels evaluate_models("evaluate_models", false); EvoNetParameters::Examples::NMask n_mask("n_mask", 2); EvoNetParameters::Examples::SequenceLength sequence_length("sequence_length", 25); EvoNetParameters::Examples::ModelType model_type("model_type", "Solution"); EvoNetParameters::Examples::SimulationType simulation_type("simulation_type", ""); EvoNetParameters::Examples::BiochemicalRxnsFilename biochemical_rxns_filename("biochemical_rxns_filename", "iJO1366.csv"); EvoNetParameters::PopulationTrainer::PopulationName population_name("population_name", ""); EvoNetParameters::PopulationTrainer::NGenerations n_generations("n_generations", 1); EvoNetParameters::PopulationTrainer::NInterpreters n_interpreters("n_interpreters", 1); EvoNetParameters::PopulationTrainer::PruneModelNum prune_model_num("prune_model_num", 10); EvoNetParameters::PopulationTrainer::RemoveIsolatedNodes remove_isolated_nodes("remove_isolated_nodes", true); EvoNetParameters::PopulationTrainer::CheckCompleteModelInputToOutput check_complete_model_input_to_output("check_complete_model_input_to_output", true); EvoNetParameters::PopulationTrainer::PopulationSize population_size("population_size", 128); EvoNetParameters::PopulationTrainer::NTop n_top("n_top", 8); EvoNetParameters::PopulationTrainer::NRandom n_random("n_random", 8); EvoNetParameters::PopulationTrainer::NReplicatesPerModel n_replicates_per_model("n_replicates_per_model", 1); EvoNetParameters::PopulationTrainer::ResetModelCopyWeights reset_model_copy_weights("reset_model_copy_weights", true); EvoNetParameters::PopulationTrainer::ResetModelTemplateWeights reset_model_template_weights("reset_model_template_weights", true); EvoNetParameters::PopulationTrainer::Logging population_logging("population_logging", true); EvoNetParameters::PopulationTrainer::SetPopulationSizeFixed set_population_size_fixed("set_population_size_fixed", false); EvoNetParameters::PopulationTrainer::SetPopulationSizeDoubling set_population_size_doubling("set_population_size_doubling", true); EvoNetParameters::PopulationTrainer::SetTrainingStepsByModelSize set_training_steps_by_model_size("set_training_steps_by_model_size", false); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 32); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 64); EvoNetParameters::ModelTrainer::NEpochsTraining n_epochs_training("n_epochs_training", 1000); EvoNetParameters::ModelTrainer::NEpochsValidation n_epochs_validation("n_epochs_validation", 25); EvoNetParameters::ModelTrainer::NEpochsEvaluation n_epochs_evaluation("n_epochs_evaluation", 10); EvoNetParameters::ModelTrainer::NTBTTSteps n_tbtt_steps("n_tbtt_steps", 64); EvoNetParameters::ModelTrainer::NTETTSteps n_tett_steps("n_tett_steps", 64); EvoNetParameters::ModelTrainer::Verbosity verbosity("verbosity", 1); EvoNetParameters::ModelTrainer::LoggingTraining logging_training("logging_training", true); EvoNetParameters::ModelTrainer::LoggingValidation logging_validation("logging_validation", false); EvoNetParameters::ModelTrainer::LoggingEvaluation logging_evaluation("logging_evaluation", true); EvoNetParameters::ModelTrainer::FindCycles find_cycles("find_cycles", true); EvoNetParameters::ModelTrainer::FastInterpreter fast_interpreter("fast_interpreter", true); EvoNetParameters::ModelTrainer::PreserveOoO preserve_ooo("preserve_ooo", true); EvoNetParameters::ModelTrainer::InterpretModel interpret_model("interpret_model", true); EvoNetParameters::ModelTrainer::ResetModel reset_model("reset_model", false); EvoNetParameters::ModelTrainer::ResetInterpreter reset_interpreter("reset_interpreter", true); EvoNetParameters::ModelReplicator::NNodeDownAdditionsLB n_node_down_additions_lb("n_node_down_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsLB n_node_right_additions_lb("n_node_right_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesLB n_node_down_copies_lb("n_node_down_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesLB n_node_right_copies_lb("n_node_right_copies_lb", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsLB n_link_additons_lb("n_link_additons_lb", 0); EvoNetParameters::ModelReplicator::NLinkCopiesLB n_link_copies_lb("n_link_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsLB n_node_deletions_lb("n_node_deletions_lb", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsLB n_link_deletions_lb("n_link_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesLB n_node_activation_changes_lb("n_node_activation_changes_lb", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesLB n_node_integration_changes_lb("n_node_integration_changes_lb", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsLB n_module_additions_lb("n_module_additions_lb", 0); EvoNetParameters::ModelReplicator::NModuleCopiesLB n_module_copies_lb("n_module_copies_lb", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsLB n_module_deletions_lb("n_module_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownAdditionsUB n_node_down_additions_ub("n_node_down_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsUB n_node_right_additions_ub("n_node_right_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesUB n_node_down_copies_ub("n_node_down_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesUB n_node_right_copies_ub("n_node_right_copies_ub", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsUB n_link_additons_ub("n_link_additons_ub", 0); EvoNetParameters::ModelReplicator::NLinkCopiesUB n_link_copies_ub("n_link_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsUB n_node_deletions_ub("n_node_deletions_ub", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsUB n_link_deletions_ub("n_link_deletions_ub", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesUB n_node_activation_changes_ub("n_node_activation_changes_ub", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesUB n_node_integration_changes_ub("n_node_integration_changes_ub", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsUB n_module_additions_ub("n_module_additions_ub", 0); EvoNetParameters::ModelReplicator::NModuleCopiesUB n_module_copies_ub("n_module_copies_ub", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsUB n_module_deletions_ub("n_module_deletions_ub", 0); EvoNetParameters::ModelReplicator::SetModificationRateFixed set_modification_rate_fixed("set_modification_rate_fixed", false); EvoNetParameters::ModelReplicator::SetModificationRateByPrevError set_modification_rate_by_prev_error("set_modification_rate_by_prev_error", false); auto parameters = std::make_tuple(id, data_dir, device_id, model_name, make_model, load_model_csv, load_model_binary, train_model, evolve_model, evaluate_model, evaluate_models, n_mask, sequence_length, model_type, simulation_type, biochemical_rxns_filename, population_name, n_generations, n_interpreters, prune_model_num, remove_isolated_nodes, check_complete_model_input_to_output, population_size, n_top, n_random, n_replicates_per_model, reset_model_copy_weights, reset_model_template_weights, population_logging, set_population_size_fixed, set_population_size_doubling, set_training_steps_by_model_size, batch_size, memory_size, n_epochs_training, n_epochs_validation, n_epochs_evaluation, n_tbtt_steps, n_tett_steps, verbosity, logging_training, logging_validation, logging_evaluation, find_cycles, fast_interpreter, preserve_ooo, interpret_model, reset_model, reset_interpreter, n_node_down_additions_lb, n_node_right_additions_lb, n_node_down_copies_lb, n_node_right_copies_lb, n_link_additons_lb, n_link_copies_lb, n_node_deletions_lb, n_link_deletions_lb, n_node_activation_changes_lb, n_node_integration_changes_lb, n_module_additions_lb, n_module_copies_lb, n_module_deletions_lb, n_node_down_additions_ub, n_node_right_additions_ub, n_node_down_copies_ub, n_node_right_copies_ub, n_link_additons_ub, n_link_copies_ub, n_node_deletions_ub, n_link_deletions_ub, n_node_activation_changes_ub, n_node_integration_changes_ub, n_module_additions_ub, n_module_copies_ub, n_module_deletions_ub, set_modification_rate_fixed, set_modification_rate_by_prev_error); // Read in the parameters LoadParametersFromCsv loadParametersFromCsv(id_int, parameters_filename); parameters = EvoNet::apply([&loadParametersFromCsv](auto&& ...args) { return loadParametersFromCsv(args...); }, parameters); // Run the application EvoNet::apply([](auto&& ...args) { main_(args ...); }, parameters); return 0; }<file_sep>Build EvoNet ============================================================================= EvoNet is built using CMAKE. Please refer to the CMAKE documentation for your particular platform. Please refer to the CUDA documentation for your GPU architecture. <file_sep># -------------------------------------------------------------------------- # EvoNet: an evolutionary approach to optimize any task # -------------------------------------------------------------------------- # Copyright The EvoNet Team # Center for Biosustainability, Technical University of Denmark 2018-2021. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING # INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # -------------------------------------------------------------------------- # $Maintainer: <NAME> $ # $Authors: <NAME> $ # -------------------------------------------------------------------------- import sys, datetime, os, os.path file_name = 'Help' destination = '../src/smartpeak/include/EvoNet/ui' header_path = os.path.join(destination, file_name + '.hh') #TODO # writing to file with indentation def write_to_file(file_name, indent_count, source_str): while (indent_count > 0): source_str = ' ' + source_str indent_count -= 1 file_name.write(source_str) # writing copyright notice def include_copyright_notice(file_name, filename): file_name.write( '// -----------------------------------------------------------------------------\n//\ EvoNet: an evolutionary approach to optimize any task\n//\ --------------------------------------------------------------------------\n//\ Copyright The EvoNet Team\n//\ 2018-{current_year}.\n//\ \n//\ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\n//\ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n//\ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n//\ ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING\n//\ INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n//\ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n//\ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n//\ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n//\ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n//\ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n//\ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\ \n//\ --------------------------------------------------------------------------\n//\ THIS FILE IS AUTO GENERATED.\n//\ --------------------------------------------------------------------------\n\n' .format(current_year=str(datetime.datetime.now().year))) print ('Writing ' + header_path) with open(header_path, 'w') as header_file: t = 0 # writing copyright notice include_copyright_notice(header_file, file_name + '.h') # writing directives/includes header_file.write('#pragma once\n') header_file.write('#include <string>\n') header_file.write('#include <unordered_map>\n\n') # writing help write_to_file(header_file, t, 'namespace SmartPeak\n') write_to_file(header_file, t, '{\n') write_to_file(header_file, t + 1, '/**\n') write_to_file(header_file, t + 2, '@brief Documentation/Help for SmartPeakGUI\n') write_to_file(header_file, t + 1, '*/\n') write_to_file(header_file, t + 1, 'static std::unordered_map<std::string, std::string> tooltip_info\n') write_to_file(header_file, t + 1, '{\n') #TODO write_to_file(header_file, t + 2, '{') write_to_file(header_file, t + 1, '}') write_to_file(header_file, t, ',\n') write_to_file(header_file, t + 1, '};\n') write_to_file(header_file, t, '}\n') <file_sep> #------------------------------------------------------------------------------ # This cmake file enables the STL debug mode if (CMAKE_COMPILER_IS_GNUCXX) if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") # add compiler flag add_definitions(/D_GLIBCXX_DEBUG) message(STATUS "STL debug mode: ${STL_DEBUG}") else() message(WARNING "STL debug mode is supported for EvoNet debug mode only") endif() else() message(WARNING "STL debug mode is supported for compiler GCC only") endif() <file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/io/ModelFile.h> #include <EvoNet/simulator/MNISTSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: /* @brief Convolution classifier References: https://github.com/pytorch/examples/blob/master/mnist/main.py @param model The network model @param n_depth_1 The number of filters in the first Cov layer @param n_depth_2 The number of filters to create from each individual filter in the first Cov layer e.g., n_depth_1 = 32 and n_depth_2 = 2 the first Cov layer will have 32 filters and the second will have 64 layers @param n_fc The length of each fully connected layer @param add_feature_norm Optional normalization layer after each convolution */ void makeCovNet(Model<TensorT>& model, const int& n_inputs, const int& n_outputs, const int& n_depth_1 = 32, const int& n_depth_2 = 2, const int& n_depth_3 = 2, const int& n_fc_1 = 128, const int& n_fc_2 = 32, const int& filter_size = 5, const int& filter_stride = 1, const int& pool_size = 2, const int& pool_stride = 2, const bool& add_pool = true, const bool& add_feature_norm = false, const bool& specify_layers = false, const bool& share_weights = true) { model.setId(0); model.setName("CovNet"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` std::shared_ptr<ActivationOp<TensorT>> activation, activation_grad; if (add_feature_norm) { activation = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } else { activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); } // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8, 10)); // Add the first convolution -> max pool -> ReLU layers std::vector<std::vector<std::string>> node_names_l0; for (size_t d = 0; d < n_depth_1; ++d) { std::vector<std::string> node_names; std::string conv_name = "Conv0-" + std::to_string(d); node_names = model_builder.addConvolution(model, conv_name, conv_name, node_names_input, sqrt(node_names_input.size()), sqrt(node_names_input.size()), 0, 0, filter_size, filter_size, filter_stride, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers, share_weights); if (add_feature_norm) { std::string norm_name = "Norm0-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); std::string gain_name = "Gain0-" + std::to_string(d); node_names = model_builder.addSinglyConnected(model, gain_name, gain_name, node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_pool) { std::string pool_name = "Pool0-" + std::to_string(d); node_names = model_builder.addConvolution(model, pool_name, pool_name, node_names, sqrt(node_names.size()), sqrt(node_names.size()), 1, 1, pool_size, pool_size, pool_stride, 0, 0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<MaxOp<TensorT>>(MaxOp<TensorT>()), std::make_shared<MaxErrorOp<TensorT>>(MaxErrorOp<TensorT>()), std::make_shared<MaxWeightGradOp<TensorT>>(MaxWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } node_names_l0.push_back(node_names); } // Add the second convolution -> max pool -> ReLU layers std::vector<std::vector<std::string>> node_names_l1; int l_cnt = 0; for (const std::vector<std::string>& node_names_l : node_names_l0) { for (size_t d = 0; d < n_depth_2; ++d) { std::vector<std::string> node_names; std::string conv_name = "Conv1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addConvolution(model, conv_name, conv_name, node_names_l, sqrt(node_names_l.size()), sqrt(node_names_l.size()), 0, 0, filter_size, filter_size, filter_stride, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers, share_weights); if (add_feature_norm) { std::string norm_name = "Norm1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); std::string gain_name = "Gain1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addSinglyConnected(model, gain_name, gain_name, node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_pool) { std::string pool_name = "Pool1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addConvolution(model, pool_name, pool_name, node_names, sqrt(node_names.size()), sqrt(node_names.size()), 1, 1, pool_size, pool_size, pool_stride, 0, 0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<MaxOp<TensorT>>(MaxOp<TensorT>()), std::make_shared<MaxErrorOp<TensorT>>(MaxErrorOp<TensorT>()), std::make_shared<MaxWeightGradOp<TensorT>>(MaxWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } node_names_l1.push_back(node_names); } ++l_cnt; } // Add the third convolution -> max pool -> ReLU layers std::vector<std::vector<std::string>> node_names_l2; l_cnt = 0; for (const std::vector<std::string>& node_names_l : node_names_l1) { for (size_t d = 0; d < n_depth_3; ++d) { std::vector<std::string> node_names; std::string conv_name = "Conv2-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addConvolution(model, conv_name, conv_name, node_names_l, sqrt(node_names_l.size()), sqrt(node_names_l.size()), 0, 0, filter_size, filter_size, filter_stride, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers, share_weights); if (add_feature_norm) { std::string norm_name = "Norm2-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); std::string gain_name = "Gain2-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addSinglyConnected(model, gain_name, gain_name, node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_pool) { std::string pool_name = "Pool2-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addConvolution(model, pool_name, pool_name, node_names, sqrt(node_names.size()), sqrt(node_names.size()), 1, 1, pool_size, pool_size, pool_stride, 0, 0, std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), std::make_shared<MaxOp<TensorT>>(MaxOp<TensorT>()), std::make_shared<MaxErrorOp<TensorT>>(MaxErrorOp<TensorT>()), std::make_shared<MaxWeightGradOp<TensorT>>(MaxWeightGradOp<TensorT>()), std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } node_names_l1.push_back(node_names); } ++l_cnt; } // Linearize the node names std::vector<std::string> node_names; if (node_names_l2.size()) { for (const std::vector<std::string>& node_names_l : node_names_l2) { for (const std::string& node_name : node_names_l) { node_names.push_back(node_name); } } } if (node_names_l1.size()) { for (const std::vector<std::string>& node_names_l : node_names_l1) { for (const std::string& node_name : node_names_l) { node_names.push_back(node_name); } } } else { for (const std::vector<std::string>& node_names_l : node_names_l0) { for (const std::string& node_name : node_names_l) { node_names.push_back(node_name); } } } // Add the first FC layer if (n_fc_1 > 0) { node_names = model_builder.addFullyConnected(model, "FC0", "FC0", node_names, n_fc_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size() + n_fc_1, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { node_names = model_builder.addNormalization(model, "FC0-FeatureNorm", "FC0-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC0-FeatureNorm-gain", "FC0-FeatureNorm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } } // Add the second FC layer if (n_fc_2 > 0) { node_names = model_builder.addFullyConnected(model, "FC1", "FC1", node_names, n_fc_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size() + n_fc_2, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { node_names = model_builder.addNormalization(model, "FC1-FeatureNorm", "FC1-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "FC1-FeatureNorm-gain", "FC1-FeatureNorm-gain", node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } } // Add the final output layer node_names = model_builder.addFullyConnected(model, "FC1-Output", "FC1-Output", node_names, n_outputs, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 2)), solver_op, 0.0f, 0.0f, false, true); // Add the dummy output layer node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Manually define the output nodes for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } /* @brief Basic Fully Connected baseline model with Xavier-like initialization @param[in, out] model The network model @param[in] n_inputs The number of input pixels @param[in] n_outputs The number of output labels @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeFullyConn(Model<TensorT>& model, const int& n_inputs = 784, const int& n_outputs = 10, const int& n_hidden_0 = 512, const int& n_hidden_1 = 512, const int& n_hidden_2 = 512, const bool& add_feature_norm = false, const bool& add_batch_norm = false, const bool& specify_layers = false) { model.setId(0); model.setName("FullyConnectedClassifier"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` std::shared_ptr<ActivationOp<TensorT>> activation, activation_grad; if (add_feature_norm || add_batch_norm) { activation = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } else { activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); } std::shared_ptr<ActivationOp<TensorT>> activation_batch_norm, activation_batch_norm_grad; if (add_feature_norm) { activation_batch_norm = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_batch_norm_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } else { activation_batch_norm = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_batch_norm_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); } std::shared_ptr<ActivationOp<TensorT>> activation_feature_norm = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_feature_norm_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-3, 0.9, 0.999, 1e-8, 100)); // Add the 1st FC layer if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_batch_norm) { node_names = model_builder.addSinglyConnected(model, "EN0-BatchNorm", "EN0-BatchNorm", node_names, node_names.size(), std::make_shared<BatchNormOp<TensorT>>(BatchNormOp<TensorT>()), std::make_shared<BatchNormGradOp<TensorT>>(BatchNormGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); node_names = model_builder.addSinglyConnected(model, "EN0-BatchNorm-gain", "EN0-BatchNorm-gain", node_names, node_names.size(), activation_batch_norm, activation_batch_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "EN0-FeatureNorm", "EN0-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN0-FeatureNorm-gain", "EN0-FeatureNorm-gain", node_names, node_names.size(), activation_feature_norm, activation_feature_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } } // Add the 2nd FC layer if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_batch_norm) { node_names = model_builder.addSinglyConnected(model, "EN1-BatchNorm", "EN1-BatchNorm", node_names, node_names.size(), std::make_shared<BatchNormOp<TensorT>>(BatchNormOp<TensorT>()), std::make_shared<BatchNormGradOp<TensorT>>(BatchNormGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); node_names = model_builder.addSinglyConnected(model, "EN1-BatchNorm-gain", "EN1-BatchNorm-gain", node_names, node_names.size(), activation_batch_norm, activation_batch_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "EN1-FeatureNorm", "EN1-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN1-FeatureNorm-gain", "EN1-FeatureNorm-gain", node_names, node_names.size(), activation_feature_norm, activation_feature_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } } // Add the 3nd FC layer if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_batch_norm) { node_names = model_builder.addSinglyConnected(model, "EN2-BatchNorm", "EN2-BatchNorm", node_names, node_names.size(), std::make_shared<BatchNormOp<TensorT>>(BatchNormOp<TensorT>()), std::make_shared<BatchNormGradOp<TensorT>>(BatchNormGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); node_names = model_builder.addSinglyConnected(model, "EN2-BatchNorm-gain", "EN2-BatchNorm-gain", node_names, node_names.size(), activation_batch_norm, activation_batch_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "EN2-FeatureNorm", "EN2-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN2-FeatureNorm-gain", "EN2-FeatureNorm-gain", node_names, node_names.size(), activation_feature_norm, activation_feature_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } } node_names = model_builder.addFullyConnected(model, "DE-Output", "DE-Output", node_names, n_outputs, //std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), //std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), activation_feature_norm, activation_feature_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 1)), solver_op, 0.0f, 0.0f, false, true); // Add the actual output nodes node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_outputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } void adaptiveTrainerScheduler( const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<float>& model_errors) { if (n_epochs % 10 == 0) { //if (n_epochs % 1000 == 0 && n_epochs != 0) { // save the model every 1000 epochs model_interpreter.getModelResults(model, false, true, false, false); ModelFile<TensorT> data; // Save weights to .csv data.storeModelCsv(model.getName() + "_" + std::to_string(n_epochs) + "_nodes.csv", model.getName() + "_" + std::to_string(n_epochs) + "_links.csv", model.getName() + "_" + std::to_string(n_epochs) + "_weights.csv", model, false, false, true); //// Save to binary //data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); //ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; //interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } } void trainingModelLogger(const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) override { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); model_logger.setLogNodeOutputsEpoch(false); model_logger.setLogNodeInputsEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1000 == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_interpreter.getModelResults(model, true, false, false, false); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->metric_names_) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } }; template<typename TensorT> class DataSimulatorExt : public MNISTSimulator<TensorT> { public: void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); assert(n_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == this->validation_data.dimension(1)); // make a vector of sample_indices Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, n_epochs); // Reformat the input data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { for (int nodes_iter = 0; nodes_iter < this->training_data.dimension(1); ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->training_data(sample_indices[epochs_iter * batch_size + batch_iter], nodes_iter); //input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->training_data(sample_indices[0], nodes_iter); // test on only 1 sample } for (int nodes_iter = 0; nodes_iter < this->training_labels.dimension(1); ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->training_labels(sample_indices[epochs_iter * batch_size + batch_iter], nodes_iter); //output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->training_labels(sample_indices[0], nodes_iter); // test on only 1 sample } } } } time_steps.setConstant(1.0f); } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); assert(n_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == this->validation_data.dimension(1)); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, n_epochs); // Reformat the input data for validation for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { for (int nodes_iter = 0; nodes_iter < this->validation_data.dimension(1); ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = this->validation_data(sample_indices[epochs_iter * batch_size + batch_iter], nodes_iter); } for (int nodes_iter = 0; nodes_iter < this->validation_labels.dimension(1); ++nodes_iter) { output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = (TensorT)this->validation_labels(sample_indices[epochs_iter * batch_size + batch_iter], nodes_iter); } } } } time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); assert(n_output_nodes == 2 * this->training_labels.dimension(1)); assert(n_metric_output_nodes == this->training_labels.dimension(1)); assert(n_input_nodes == 784); assert(memory_size == 1); // make the start and end sample indices [BUG FREE] Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, 1); // Reformat the input data for training [BUG FREE] for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], nodes_iter); } for (int nodes_iter = 0; nodes_iter < this->training_labels.dimension(1); ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter + this->training_labels.dimension(1)) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = (TensorT)this->training_labels(sample_indices[batch_iter], nodes_iter); } } } } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); assert(n_output_nodes == 2 * this->validation_labels.dimension(1)); assert(n_metric_output_nodes == this->validation_labels.dimension(1)); assert(n_input_nodes == 784); assert(memory_size == 1); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, 1); // Reformat the input data for validation for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], nodes_iter); } for (int nodes_iter = 0; nodes_iter < this->validation_labels.dimension(1); ++nodes_iter) { loss_output_data(batch_iter, memory_iter, nodes_iter) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter + this->validation_labels.dimension(1)) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = (TensorT)this->validation_labels(sample_indices[batch_iter], nodes_iter); } } } } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> {}; /** @brief Image classification MNIST example whereby all pixels are linearized and read into the model. The model then attempts to classify the image using a CovNet architecture Data processing: - whole image pixels (linearized) 28x28 normalized to 0 to 1 - classifier (1 hot vector from 0 to 9) */ void main_MNIST(const std::string& data_dir, const bool& make_model, const bool& train_model) { const int n_hard_threads = std::thread::hardware_concurrency(); const int n_threads = 1; // define the populatin trainer PopulationTrainerExt<float> population_trainer; population_trainer.setNGenerations(1); population_trainer.setLogging(false); // define the population logger PopulationLogger<float> population_logger(true, true); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false, false); // define the data simulator const std::size_t input_size = 784; const std::size_t training_data_size = 60000; //60000; const std::size_t validation_data_size = 10000; //10000; DataSimulatorExt<float> data_simulator; // read in the training data std::string training_data_filename = data_dir + "train-images.idx3-ubyte"; std::string training_labels_filename = data_dir + "train-labels.idx1-ubyte"; data_simulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // read in the validation data std::string validation_data_filename = data_dir + "t10k-images.idx3-ubyte"; std::string validation_labels_filename = data_dir + "t10k-labels.idx1-ubyte"; data_simulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); data_simulator.unitScaleData(); // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the output nodes std::vector<std::string> output_nodes; for (int i = 0; i < data_simulator.mnist_labels.size(); ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(1, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(128); model_trainer.setMemorySize(1); model_trainer.setNEpochsTraining(100001); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true); model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setLossFunctions({ std::make_shared<CrossEntropyWithLogitsLossOp<float>>(CrossEntropyWithLogitsLossOp<float>(1e-24, 1.0)), std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-24, 0.0)) }); model_trainer.setLossFunctionGrads({ std::make_shared<CrossEntropyWithLogitsLossGradOp<float>>(CrossEntropyWithLogitsLossGradOp<float>(1e-24, 1.0)), std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-24, 0.0)) }); model_trainer.setLossOutputNodes({ output_nodes, output_nodes }); model_trainer.setMetricFunctions({ std::make_shared<PrecisionMCMicroOp<float>>(PrecisionMCMicroOp<float>()) }); model_trainer.setMetricOutputNodes({ output_nodes }); model_trainer.setMetricNames({ "PrecisionMCMicro" }); // define the model replicator ModelReplicatorExt<float> model_replicator; // define the initial population Model<float> model; if (make_model) { std::cout << "Making the model..." << std::endl; //model_trainer.makeFullyConn(model, input_nodes.size(), output_nodes.size(), 512, 0, 0, true, false, true); // Baseline //model_trainer.makeCovNet(model, input_nodes.size(), output_nodes.size(), 2, 2, 0, 32, 4, 7, 1, 2, 2, false, true, true); // Sanity test model_trainer.makeCovNet(model, input_nodes.size(), output_nodes.size(), 1, 0, 0, 1, 0, 7, 1, 2, 2, false, false, true, true); // Sanity test //model_trainer.makeCovNet(model, input_nodes.size(), output_nodes.size(), 32, 2, 0, 512, 32, 5, 1, 2, 2, true, false, true, true); // Recommended model //model_trainer.makeCovNet(model, input_nodes.size(), output_nodes.size(), 32, 2, 0, 512, 32, 7, 1, 2, 2, false, false, true, true); // Recommended model } else { // read in the trained model std::cout << "Reading in the model..." << std::endl; const std::string model_filename = data_dir + "CovNet_model.binary"; const std::string interpreter_filename = data_dir + "CovNet_interpreter.binary"; ModelFile<float> model_file; model_file.loadModelBinary(model_filename, model); model.setId(1); model.setName("CovNet1"); ModelInterpreterFileDefaultDevice<float> model_interpreter_file; model_interpreter_file.loadModelInterpreterBinary(interpreter_filename, model_interpreters[0]); //// Modify the learning rate //std::cout << "Modifying the learning rate..." << std::endl; //for (auto& weight_map : model.weights_) { // if (weight_map.second->getSolverOpShared()->getName() == "AdamOp") { // weight_map.second->getSolverOpShared()->setLearningRate(1e-5); // } //} } //std::vector<Model<float>> population = { model }; if (train_model) { // Train the model std::cout << "Training the model..." << std::endl; std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); //// Evolve the population //std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); //PopulationTrainerFile<float> population_trainer_file; //population_trainer_file.storeModels(population, "MNIST"); //population_trainer_file.storeModelValidations("MNISTErrors.csv", models_validation_errors_per_generation); } else { //// Evaluate the population //std::cout << "Evaluating the model..." << std::endl; //population_trainer.evaluateModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, input_nodes); } } /// MNIST_CovNet_example C:/Users/dmccloskey/Documents/GitHub/mnist/ true true int main(int argc, char** argv) { // Parse the user commands std::string data_dir = "C:/Users/dmccloskey/Documents/GitHub/mnist/"; //std::string data_dir = "/home/user/data/"; //std::string data_dir = "C:/Users/domccl/GitHub/mnist/"; bool make_model = true, train_model = true; if (argc >= 2) { data_dir = argv[1]; } if (argc >= 3) { make_model = (argv[2] == std::string("true")) ? true : false; } if (argc >= 4) { train_model = (argv[3] == std::string("true")) ? true : false; } // run the application main_MNIST(data_dir, make_model, train_model); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_LOSSFUNCTIONTENSOR_H #define EVONET_LOSSFUNCTIONTENSOR_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif #include <EvoNet/core/Preprocessing.h> #include <unsupported/Eigen/CXX11/Tensor> #include <iostream> namespace EvoNet { /** @brief Base class loss function. */ template<typename TensorT, typename DeviceT> class LossFunctionTensorOp { public: LossFunctionTensorOp() = default; LossFunctionTensorOp(const TensorT& eps, const TensorT& scale) : eps_(eps), scale_(scale) {}; virtual ~LossFunctionTensorOp() = default; virtual std::string getName() = 0; virtual void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const = 0; protected: TensorT eps_ = TensorT(1e-24); TensorT scale_ = TensorT(1.0); TensorT reward_ = TensorT(10.0); TensorT min_ = TensorT(-1e9); TensorT max_ = TensorT(1e9); }; /** @brief Base class loss function gradient. */ template<typename TensorT, typename DeviceT> class LossFunctionGradTensorOp { public: LossFunctionGradTensorOp() = default; LossFunctionGradTensorOp(const TensorT& eps, const TensorT& scale) : eps_(eps), scale_(scale) {}; ~LossFunctionGradTensorOp() = default; virtual std::string getName() = 0; virtual void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const = 0; protected: TensorT eps_ = TensorT(1e-24); TensorT scale_ = TensorT(1.0); TensorT reward_ = TensorT(10.0); TensorT min_ = TensorT(-1e9); TensorT max_ = TensorT(1e9); }; /** @brief Manhattan loss function. */ template<typename TensorT, typename DeviceT> class ManhattanDistanceLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "ManhattanDistanceLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); error_tensor.chip(time_step, 1).device(device) += (((expected_tensor - predicted_chip).pow(TensorT(2)).sqrt()).sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief Manhattan distance loss function gradient. */ template<typename TensorT, typename DeviceT> class ManhattanDistanceLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "ManhattanDistanceLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto result = (expected_tensor - predicted_chip == predicted_chip.constant(TensorT(0))).select( predicted_chip.constant(TensorT(0)), ((expected_tensor - predicted_chip) / ((expected_tensor - predicted_chip).pow(TensorT(2)).sqrt()))*error_tensor.chip(time_step, 1).constant(this->scale_)); error_tensor.chip(time_step, 1).device(device) += result.clip(this->min_, this->max_); }; }; /** @brief L2Norm loss function. */ template<typename TensorT, typename DeviceT> class L2NormLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "L2NormLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); error_tensor.chip(time_step, 1).device(device) += (((expected_tensor - (predicted_chip).pow(TensorT(2))) * expected_tensor.constant(TensorT(0.5))).sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); // modified to simplify the derivative }; }; /** @brief L2Norm loss function gradient. */ template<typename TensorT, typename DeviceT> class L2NormLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "L2NormLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); error_tensor.chip(time_step, 1).device(device) += ((expected_tensor - predicted_chip) * error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); // modified to exclude the 0.5 }; }; /** @brief Binary Cross Entropy loss function. */ template<typename TensorT, typename DeviceT> class BCELossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "BCELossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto tmp = -( expected_tensor * predicted_chip.clip(this->eps_, TensorT(1)).log() + (expected_tensor.constant(TensorT(1)) - expected_tensor) * (expected_tensor.constant(TensorT(1)) - predicted_chip).clip(this->eps_, TensorT(1)).log() ); error_tensor.chip(time_step, 1).device(device) += (tmp.sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief Binary Cross Entropy loss function gradient. The derivative of -(z * log(x) + (1 - z)*log(1-x)) is the following = (1-z)/(1-x) - z/x = -(x-z)/((x-1)*x) */ template<typename TensorT, typename DeviceT> class BCELossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "BCELossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto term1 = expected_tensor / predicted_chip.clip(this->eps_, TensorT(1)); auto term2 = (expected_tensor.constant(TensorT(1)) - expected_tensor) / (expected_tensor.constant(TensorT(1)) - predicted_chip.clip(TensorT(0), TensorT(1) - this->eps_)); auto result = term1 - term2; //auto result = (predicted_chip - expected_tensor) / ((predicted_chip - expected_tensor.constant(TensorT(1))) * predicted_chip); error_tensor.chip(time_step, 1).device(device) += (result*error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief NegativeLogLikelihood loss function. */ template<typename TensorT, typename DeviceT> class NegativeLogLikelihoodLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; void setN(const TensorT& n) { n_ = n; } std::string getName() { return "NegativeLogLikelihoodLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); error_tensor.chip(time_step, 1).device(device) += ((-expected_tensor * (predicted_chip.clip(TensorT(1e-6),TensorT(this->max_)).log())) * expected_tensor.constant(TensorT(1) / TensorT(layer_size))).sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_); }; private: TensorT n_ = TensorT(1); ///< the number of total classifiers }; /** @brief NegativeLogLikelihood loss function gradient. */ template<typename TensorT, typename DeviceT> class NegativeLogLikelihoodLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; void setN(const TensorT& n) { n_ = n; } std::string getName() { return "NegativeLogLikelihoodLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); // NOTE: added - so that the gradient is - error_tensor.chip(time_step, 1).device(device) -= ((expected_tensor / (predicted_chip + expected_tensor.constant(TensorT(this->eps_))) / expected_tensor.constant(TensorT(layer_size))) *error_tensor.chip(time_step, 1).constant(TensorT(this->scale_))).clip(this->min_, this->max_); }; private: TensorT n_ = TensorT(1.0); ///< the number of total classifiers }; /** @brief MSE Mean Squared Error loss function. */ template<typename TensorT, typename DeviceT> class MSELossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "MSELossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); error_tensor.chip(time_step, 1).device(device) += (((expected_tensor - predicted_chip).pow(TensorT(2)) * expected_tensor.constant(TensorT(0.5)) / expected_tensor.constant(TensorT(layer_size))).sum(Eigen::array<int, 1>({ 1 })) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MSE Mean Squared Error loss function gradient. */ template<typename TensorT, typename DeviceT> class MSELossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "MSELossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); error_tensor.chip(time_step, 1).device(device) += (((expected_tensor - predicted_chip) / expected_tensor.constant(TensorT(layer_size))) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MAE Mean Absolute Error loss function. */ template<typename TensorT, typename DeviceT> class MAELossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "MAELossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); error_tensor.chip(time_step, 1).device(device) += (((expected_tensor - predicted_chip).pow(TensorT(2)).sqrt() / expected_tensor.constant(TensorT(layer_size))).sum(Eigen::array<int, 1>({ 1 })) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MAE Mean Absolute Error loss function gradient. */ template<typename TensorT, typename DeviceT> class MAELossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "MAELossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto result = (expected_tensor - predicted_chip == predicted_chip.constant(TensorT(0))).select( predicted_chip.constant(TensorT(0)), (((expected_tensor - predicted_chip) / (expected_tensor - predicted_chip).pow(TensorT(2)).sqrt() / expected_tensor.constant(TensorT(layer_size)))*error_tensor.chip(time_step, 1).constant(this->scale_)) ); error_tensor.chip(time_step, 1).device(device) += result.clip(this->min_, this->max_); }; }; /** @brief MRSE Mean Root Squared Error loss function. WIP. Based on the following references: https://stats.stackexchange.com/questions/102810/pros-of-jeffries-matusita-distance https://en.wikipedia.org/wiki/Bhattacharyya_distance */ template<typename TensorT, typename DeviceT> class MRSELossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "MRSELossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto min_offset = predicted_chip.chip(0, 2) - predicted_chip.minimum(Eigen::array<Eigen::Index, 1>({1})).broadcast(Eigen::array<Eigen::Index, 2>({ 1, layer_size })); error_tensor.chip(time_step, 1).device(device) += (((expected_tensor.sqrt() - min_offset.sqrt()).pow(TensorT(2)) / expected_tensor.constant(TensorT(layer_size))).sum(Eigen::array<int, 1>({ 1 })) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MRSE Mean Root Squared Error loss function gradient. WIP. */ template<typename TensorT, typename DeviceT> class MRSELossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "MRSELossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto min_offset = predicted_chip.chip(0, 2) - predicted_chip.minimum(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 2>({ 1, layer_size })); error_tensor.chip(time_step, 1).device(device) += (((expected_tensor.sqrt() - min_offset.sqrt()) / (min_offset.sqrt() - expected_tensor.constant(this->eps_)) / expected_tensor.constant(TensorT(layer_size))) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MLE Mean Root Squared Error loss function. */ template<typename TensorT, typename DeviceT> class MLELossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "MLELossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> expected_tensor(expected, batch_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto diff = expected_tensor - predicted_chip; auto min_offset = diff.chip(0, 2) - diff.minimum(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 2>({ 1, layer_size })) + diff.chip(0, 2).constant(TensorT(1)); error_tensor.chip(time_step, 1).device(device) += ((min_offset.log() / expected_tensor.chip(0, 2).constant(TensorT(layer_size))).sum(Eigen::array<int, 1>({ 1 })) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MLE Mean Root Squared Error loss function gradient. */ template<typename TensorT, typename DeviceT> class MLELossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "MLELossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> expected_tensor(expected, batch_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto diff = expected_tensor - predicted_chip; auto min_offset = diff.chip(0, 2) - diff.minimum(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 2>({ 1, layer_size })) + diff.chip(0, 2).constant(TensorT(1)); // TODO: change to (min_offset == min_offset.constant(TensorT(0))).select(min_offset.constant(TensorT(0)), ((expected_tensor.chip(0, 2).constant(TensorT(1)) / min_offset / expected_tensor.chip(0, 2).constant(TensorT(layer_size)))*error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_)); error_tensor.chip(time_step, 1).device(device) += ((expected_tensor.chip(0, 2).constant(TensorT(1)) / (min_offset - expected_tensor.chip(0, 2).constant(this->eps_)) / expected_tensor.chip(0, 2).constant(TensorT(layer_size))) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief KLDivergenceMu loss function. References <NAME> Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(TensorT(2)) - logvar.exp()) */ template<typename TensorT, typename DeviceT> class KLDivergenceMuLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: KLDivergenceMuLossTensorOp() = default; KLDivergenceMuLossTensorOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionTensorOp<TensorT, DeviceT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceMuLossTensorOp() = default; std::string getName() { return "KLDivergenceMuLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto kl_div = (-expected_tensor.constant(TensorT(0.5)) + expected_tensor.constant(TensorT(0.5)) * predicted_chip.pow(TensorT(2))).sum(Eigen::array<int, 1>({ 1 })); auto kl_div_cap = kl_div - error_tensor.chip(time_step, 1).constant(this->capacity_); auto result = kl_div_cap * error_tensor.chip(time_step, 1).constant(this->scale_); error_tensor.chip(time_step, 1).device(device) += (result == result).select(result.clip(this->min_, this->max_), result.constant(TensorT(0))); }; private: TensorT capacity_ = TensorT(0); }; /** @brief KLDivergenceMu loss function gradient. */ template<typename TensorT, typename DeviceT> class KLDivergenceMuLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: KLDivergenceMuLossGradTensorOp() = default; KLDivergenceMuLossGradTensorOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionGradTensorOp<TensorT, DeviceT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceMuLossGradTensorOp() = default; std::string getName() { return "KLDivergenceMuLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto kl_div = expected_tensor.constant(TensorT(2)) * predicted_chip; auto kl_div_cap = kl_div - expected_tensor.constant(this->capacity_); // NOTE: changed to -= to ensure a negative gradient error_tensor.chip(time_step, 1).device(device) -= (kl_div_cap * error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; private: TensorT capacity_ = TensorT(0); }; /** @brief KLDivergenceLogVar loss function. References <NAME> Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(TensorT(2)) - logvar.exp()) */ template<typename TensorT, typename DeviceT> class KLDivergenceLogVarLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: KLDivergenceLogVarLossTensorOp() = default; KLDivergenceLogVarLossTensorOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionTensorOp<TensorT, DeviceT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceLogVarLossTensorOp() = default; std::string getName() { return "KLDivergenceLogVarLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto kl_div = (-expected_tensor.constant(TensorT(0.5)) - expected_tensor.constant(TensorT(0.5)) * predicted_chip + (expected_tensor.constant(TensorT(0.5)) * predicted_chip).exp()).sum(Eigen::array<int, 1>({ 1 })); auto kl_div_cap = kl_div - error_tensor.chip(time_step, 1).constant(this->capacity_); auto result = kl_div_cap * error_tensor.chip(time_step, 1).constant(this->scale_); error_tensor.chip(time_step, 1).device(device) += (result == result).select(result.clip(this->min_, this->max_), result.constant(TensorT(0))); }; private: TensorT capacity_ = TensorT(0); }; /** @brief KLDivergenceLogVar loss function gradient. */ template<typename TensorT, typename DeviceT> class KLDivergenceLogVarLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: KLDivergenceLogVarLossGradTensorOp() = default; KLDivergenceLogVarLossGradTensorOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionGradTensorOp<TensorT, DeviceT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceLogVarLossGradTensorOp() = default; std::string getName() { return "KLDivergenceLogVarLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto kl_div = -expected_tensor.constant(TensorT(0.5)) + (expected_tensor.constant(TensorT(0.5)) * predicted_chip).exp(); auto kl_div_cap = kl_div - expected_tensor.constant(this->capacity_); auto result = kl_div_cap * error_tensor.chip(time_step, 1).constant(this->scale_); // NOTE: changed to -= to ensure a negative gradient error_tensor.chip(time_step, 1).device(device) -= (result == result).select(result.clip(this->min_, this->max_), result.constant(TensorT(0))); }; private: TensorT capacity_ = TensorT(0); }; /** @brief BCEWithLogits loss function. Binary Cross Entropy with integrated sigmoid layer z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) References: https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss PyTorch implementation: max_val = (-input).clamp(min=0) loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log() TensorFlow implementation: max(x, 0) - x * z + log(1 + exp(-abs(x))) */ template<typename TensorT, typename DeviceT> class BCEWithLogitsLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "BCEWithLogitsLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); //auto max_values = (-predicted_chip).cwiseMax(expected_tensor.constant(TensorT(0))); // pytorch version //auto max_values = predicted_chip.cwiseMax(expected_tensor.constant(TensorT(0))); // tensorFlow version auto max_values = (expected_tensor < expected_tensor.constant(TensorT(0))).select(predicted_chip.cwiseMin(expected_tensor.constant(TensorT(0))), predicted_chip.cwiseMax(expected_tensor.constant(TensorT(0)))); // custom version auto abs_values = -(predicted_chip.abs()); // tensorFlow and custom versions // Temporary memory for computation TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 1>> result(tmp_data, batch_size); //result.device(device) = ( // predicted_chip - predicted_chip * expected_tensor + max_values + ((-max_values).exp() + (-predicted_chip - max_values).exp()).log() // ).sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_); // pytorch version //result.device(device) = ( // max_values - predicted_chip * expected_tensor + (expected_tensor.constant(TensorT(1)) + abs_values.exp()).log() // ).sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_); // tensorFlow version result.device(device) = ( max_values - predicted_chip * expected_tensor.abs() + (expected_tensor.constant(TensorT(1)) + abs_values.exp()).log() ).sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_); // custom version error_tensor.chip(time_step, 1).device(device) += (result == result).select(result.clip(this->min_, this->max_), result.constant(TensorT(0))); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief BCEWithLogits loss function gradient. Starting from the following BCEWithLogits formula maxOrMin[z](x, 0) - x * abs(z) + log(1 + exp(-abs(x))) The derivative with respect to x can be formulated as -x*exp(-abs(x)) / ((exp(-abs(x)) + 1) * abs(x)) - abs(z) + maxOrMin[z](x/abs(x), 0) */ template<typename TensorT, typename DeviceT> class BCEWithLogitsLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "BCEWithLogitsLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto predicted_dir = predicted_chip / predicted_chip.abs(); auto max_values = (expected_tensor < expected_tensor.constant(TensorT(0))).select(predicted_dir.cwiseMin(expected_tensor.constant(TensorT(0))), predicted_dir.cwiseMax(expected_tensor.constant(TensorT(0)))); // custom version auto abs_values = -(predicted_chip.abs()); // tensorFlow and custom versions // Temporary memory for computation TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> result(tmp_data, batch_size, layer_size); //auto result = ((expected_tensor - expected_tensor.constant(TensorT(1))) * predicted_chip.exp() + expected_tensor) / (predicted_chip.exp() + expected_tensor.constant(TensorT(1))); result.device(device) = (-predicted_chip * abs_values.exp() / ((abs_values.exp() + expected_tensor.constant(TensorT(1))) * predicted_chip.abs())) - expected_tensor.abs() + max_values; auto result_scale = result * error_tensor.chip(time_step, 1).constant(this->scale_); error_tensor.chip(time_step, 1).device(device) -= (result_scale == result_scale).select(result_scale.clip(this->min_, this->max_), result_scale.constant(TensorT(0))); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief Softmax + Cross Entropy loss function. NOTES: implemented as the following: def stable_softmax(X): exps = np.exp(X - np.max(X)) return exps / np.sum(exps) def cross_entropy(p,y): """ p is the output from softmax layer (num_examples x num_classes) y is labels (num_examples x 1) """ m = y.shape[0] log_likelihood = -np.log(p[range(m),y]) loss = np.sum(log_likelihood) / m return loss */ template<typename TensorT, typename DeviceT> class CrossEntropyWithLogitsLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "CrossEntropyWithLogitsLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 5>> predicted_tensor(predicted, batch_size, memory_size, layer_size, 1, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); // 4 dims auto exps = (predicted_chip.chip(0, 3) - predicted_chip.maximum(Eigen::array<int, 1>({ 1 })).broadcast(Eigen::array<int, 3>({1, layer_size, 1}))).exp(); // 3 dims auto stable_softmax = exps.chip(0, 2) / exps.sum(Eigen::array<int, 1>({ 1 })).broadcast(Eigen::array<int, 2>({ 1, layer_size })); // 2 dims // Temporary memory for computation TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 1>> result(tmp_data, batch_size); result.device(device) = ((-expected_tensor * (stable_softmax.clip(this->eps_, TensorT(1)).log())) * expected_tensor.constant(TensorT(1) / TensorT(layer_size))).sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_); error_tensor.chip(time_step, 1).device(device) += (result == result).select(result.clip(this->min_, this->max_), result.constant(TensorT(0))); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; }; /** @brief Softmax + Cross Entropy loss function gradient. See for derivations: https://math.stackexchange.com/questions/945871/derivative-of-softmax-loss-function */ template<typename TensorT, typename DeviceT> class CrossEntropyWithLogitsLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "CrossEntropyWithLogitsLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { // Option 1: from derivation Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> expected_tensor(expected, batch_size, layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto expected_sum = expected_tensor.sum(Eigen::array<Eigen::Index, 1>({ 1 })).broadcast(Eigen::array<Eigen::Index, 2>({ 1, layer_size })); auto result = (((predicted_chip * expected_sum - expected_tensor.chip(0, 2)) / error_tensor.chip(time_step, 1).constant(TensorT(layer_size))) * error_tensor.chip(time_step, 1).constant(this->scale_)); error_tensor.chip(time_step, 1).device(device) -= (result == result).select(result.clip(this->min_, this->max_), result.constant(TensorT(0))); }; }; /** @brief MSERangeUB Mean Squared Error loss function. */ template<typename TensorT, typename DeviceT> class MSERangeUBLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "MSERangeUBLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto mse = ((expected_tensor - predicted_chip).pow(TensorT(2)) * expected_tensor.constant(TensorT(0.5)) / expected_tensor.constant(TensorT(layer_size))); auto in_range = predicted_chip > expected_tensor; auto result = in_range.select(mse, predicted_chip.constant((TensorT)0)); error_tensor.chip(time_step, 1).device(device) += (result.sum(Eigen::array<int, 1>({ 1 })) * error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MSERangeUB Mean Squared Error loss function gradient. */ template<typename TensorT, typename DeviceT> class MSERangeUBLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "MSERangeUBLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto mse_grad = (((expected_tensor - predicted_chip) / expected_tensor.constant(TensorT(layer_size))) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); auto in_range = predicted_chip > expected_tensor; auto result = in_range.select(mse_grad, predicted_chip.constant((TensorT)0)); error_tensor.chip(time_step, 1).device(device) += result; }; }; /** @brief MSERangeLB Mean Squared Error loss function. */ template<typename TensorT, typename DeviceT> class MSERangeLBLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "MSERangeLBLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto mse = ((expected_tensor - predicted_chip).pow(TensorT(2)) * expected_tensor.constant(TensorT(0.5)) / expected_tensor.constant(TensorT(layer_size))); auto in_range = predicted_chip < expected_tensor; auto result = in_range.select(mse, predicted_chip.constant((TensorT)0)); error_tensor.chip(time_step, 1).device(device) += (result.sum(Eigen::array<int, 1>({ 1 })) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MSERangeLB Mean Squared Error loss function gradient. */ template<typename TensorT, typename DeviceT> class MSERangeLBLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "MSERangeLBLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto mse_grad = (((expected_tensor - predicted_chip) / expected_tensor.constant(TensorT(layer_size))) *error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); auto in_range = predicted_chip < expected_tensor; auto result = in_range.select(mse_grad, predicted_chip.constant((TensorT)0)); error_tensor.chip(time_step, 1).device(device) += result; }; }; /** @brief KLDivergenceCat loss function. See implementation: https://github.com/Schlumberger/joint-vae/blob/master/jointvae/training.py#L311 KLD = alpha * log(alpha) + log(n) where n is the number of categories */ template<typename TensorT, typename DeviceT> class KLDivergenceCatLossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: KLDivergenceCatLossTensorOp() = default; KLDivergenceCatLossTensorOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionTensorOp<TensorT, DeviceT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceCatLossTensorOp() = default; std::string getName() { return "KLDivergenceCatLossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto neg_entropy = (predicted_chip * predicted_chip.log()).sum(Eigen::array<int, 1>({ 1 })); //auto neg_entropy = (predicted_chip.exp() * predicted_chip).sum(Eigen::array<int, 1>({ 1 })); // Previous where input was logAlpha auto log_cat = error_tensor.chip(time_step, 1).constant(layer_size).log(); auto kl_div_cap = neg_entropy + log_cat - (error_tensor.chip(time_step, 1).constant(this->capacity_)).cwiseMin(log_cat); auto result = kl_div_cap * error_tensor.chip(time_step, 1).constant(this->scale_); error_tensor.chip(time_step, 1).device(device) += (result == result).select(result.clip(this->min_, this->max_), result.constant(TensorT(0))); }; private: TensorT capacity_ = TensorT(0); }; /** @brief KLDivergenceCat loss function gradient. d/dx of x*log(x) + log(a) = log(x) + 1 */ template<typename TensorT, typename DeviceT> class KLDivergenceCatLossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: KLDivergenceCatLossGradTensorOp() = default; KLDivergenceCatLossGradTensorOp(const TensorT & eps, const TensorT & scale, const TensorT & capacity) : LossFunctionGradTensorOp<TensorT, DeviceT>(eps, scale), capacity_(capacity) {}; ~KLDivergenceCatLossGradTensorOp() = default; std::string getName() { return "KLDivergenceCatLossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); auto kl_div = predicted_chip.log() + predicted_chip.constant(TensorT(1)); auto log_cat = expected_tensor.constant(layer_size).log(); auto kl_div_cap = kl_div - (error_tensor.chip(time_step, 1).constant(this->capacity_)).cwiseMin(log_cat); auto result = kl_div_cap * error_tensor.chip(time_step, 1).constant(this->scale_); // NOTE: changed to -= to ensure a negative gradient error_tensor.chip(time_step, 1).device(device) -= (result == result).select(result.clip(this->min_, this->max_), result.constant(TensorT(0))); }; private: TensorT capacity_ = TensorT(0); }; /** @brief MAPE Mean Absolute Percent Error loss function. */ template<typename TensorT, typename DeviceT> class MAPELossTensorOp : public LossFunctionTensorOp<TensorT, DeviceT> { public: using LossFunctionTensorOp<TensorT, DeviceT>::LossFunctionTensorOp; std::string getName() { return "MAPELossTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> error_tensor(error, batch_size, memory_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); //auto result = (expected_tensor == expected_tensor.constant(TensorT(0))).select(expected_tensor.constant(TensorT(0)), // ((expected_tensor - predicted_chip) / expected_tensor).abs() / expected_tensor.constant(TensorT(layer_size)) //); auto result = ((expected_tensor - predicted_chip) / (expected_tensor + expected_tensor.constant(TensorT(this->eps_)))).abs() / expected_tensor.constant(TensorT(layer_size)); error_tensor.chip(time_step, 1).device(device) += (result.sum(Eigen::array<int, 1>({ 1 }))*error_tensor.chip(time_step, 1).constant(this->scale_)).clip(this->min_, this->max_); }; }; /** @brief MAPE Mean Absolute Percent Error loss function gradient. */ template<typename TensorT, typename DeviceT> class MAPELossGradTensorOp : public LossFunctionGradTensorOp<TensorT, DeviceT> { public: using LossFunctionGradTensorOp<TensorT, DeviceT>::LossFunctionGradTensorOp; std::string getName() { return "MAPELossGradTensorOp"; } void operator()(TensorT* predicted, TensorT* expected, TensorT* error, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> expected_tensor(expected, batch_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> predicted_tensor(predicted, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> error_tensor(error, batch_size, memory_size, layer_size); auto predicted_chip = predicted_tensor.chip(time_step, 1); //auto result = (expected_tensor - predicted_chip) / (expected_tensor - predicted_chip).abs() / expected_tensor.abs() / expected_tensor.constant(TensorT(layer_size)); //auto result_selected = ((expected_tensor - predicted_chip) == expected_tensor.constant(TensorT(0)) || expected_tensor == expected_tensor.constant(TensorT(0))).select( // expected_tensor.constant(TensorT(0)), result); auto result_selected = (expected_tensor - predicted_chip) / (expected_tensor - predicted_chip + expected_tensor.constant(TensorT(this->eps_))).abs() / (expected_tensor + expected_tensor.constant(TensorT(this->eps_))).abs() / expected_tensor.constant(TensorT(layer_size)); error_tensor.chip(time_step, 1).device(device) += result_selected * expected_tensor.constant(this->scale_).clip(this->min_, this->max_); }; }; /** @brief Hinge loss function. Typically used for classification NOTES: implemented as the following: def Hinge(yHat, y): error_tensor.chip(time_step, 1).device(device) += np.max(0, 1 - yHat * y) */ } #endif //EVONET_LOSSFUNCTIONTENSOR_H<file_sep>/**TODO: Add copyright*/ #include <EvoNet/io/LinkFile.h> #include <EvoNet/io/csv.h> #include <EvoNet/io/CSVWriter.h> #include <cereal/archives/binary.hpp> #include <fstream> #include <cereal/types/memory.hpp> #include <cereal/types/map.hpp> namespace EvoNet { bool LinkFile::loadLinksBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Link>>& links) { std::ofstream ofs(filename, std::ios::binary); if (ofs.is_open() == false) { cereal::BinaryOutputArchive oarchive(ofs); oarchive(links); ofs.close(); } return true; } bool LinkFile::loadLinksCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Link>>& links) { links.clear(); io::CSVReader<5> links_in(filename); links_in.read_header(io::ignore_extra_column, "link_name", "source_node_name", "sink_node_name", "weight_name", "module_name"); std::string link_name, source_node_name, sink_node_name, weight_name, module_name = ""; while(links_in.read_row(link_name, source_node_name, sink_node_name, weight_name, module_name)) { std::shared_ptr<Link> link(new Link(link_name, source_node_name, sink_node_name, weight_name)); link->setModuleName(module_name); links.emplace(link_name, link); } return true; } bool LinkFile::storeLinksBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Link>>& links) { std::ifstream ifs(filename, std::ios::binary); if (ifs.is_open()) { cereal::BinaryInputArchive iarchive(ifs); iarchive(links); ifs.close(); }return true; } bool LinkFile::storeLinksCsv(const std::string& filename, std::map<std::string, std::shared_ptr<Link>>& links) { CSVWriter csvwriter(filename); // write the headers to the first line const std::vector<std::string> headers = {"link_name", "source_node_name", "sink_node_name", "weight_name", "module_name" }; csvwriter.writeDataInRow(headers.begin(), headers.end()); for (const auto& link: links) { std::vector<std::string> row; row.push_back(link.second->getName()); row.push_back(link.second->getSourceNodeName()); row.push_back(link.second->getSinkNodeName()); row.push_back(link.second->getWeightName()); row.push_back(link.second->getModuleName()); // write to file csvwriter.writeDataInRow(row.begin(), row.end()); } return true; } }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_METABOLOMICSRECONSTRUCTIONDATASIMULATOR_H #define EVONET_METABOLOMICSRECONSTRUCTIONDATASIMULATOR_H // .h #include <EvoNet/simulator/BiochemicalDataSimulator.h> namespace EvoNet { template<typename TensorT> class MetabolomicsReconstructionDataSimulator : public BiochemicalDataSimulator<TensorT> { public: void makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void readAndProcessMetabolomicsTrainingAndValidationData(int& n_reaction_ids_training, int& n_labels_training, int& n_component_group_names_training, int& n_reaction_ids_validation, int& n_labels_validation, int& n_component_group_names_validation, const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train, const std::string& meta_data_filename_train, const std::string& metabo_data_filename_test, const std::string& meta_data_filename_test, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& offline_linear_scale_input, const bool& offline_log_transform_input, const bool& offline_standardize_input, const bool& online_linear_scale_input, const bool& online_log_transform_input, const bool& online_standardize_input, int& n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int& n_epochs, const int& batch_size, const int& memory_size) override; /* Get the non randomized training data from the cache corresponding to a single label Assumes that the data has not been randomized nor shuffled. i.e., randomize_sample_group_names = false and shuffle_data_and_labels = false Assumes that the expansion factor was 0 i.e., n_reps_per_sample = -1 @param[in] label The label to get data for */ void getNonRandomizedEncoderTrainingInputFromCacheByLabel(const std::string& label, const int& n_features, Eigen::Tensor<TensorT, 4>& input_data); /* Get the non randomized training data from the cache corresponding to a single label Assumes that the data has not been randomized nor shuffled. i.e., randomize_sample_group_names = false and shuffle_data_and_labels = false Assumes that the expansion factor was 0 i.e., n_reps_per_sample = -1 @param[in] label The label to get data for */ void getNonRandomizedDecoderTrainingOutputFromCacheByLabel(const std::string& label, const int& n_features, Eigen::Tensor<TensorT, 4>& output_data); }; template<typename TensorT> inline void MetabolomicsReconstructionDataSimulator<TensorT>::makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int & n_epochs, const int & batch_size, const int & memory_size, const int & n_input_nodes, const int & n_loss_output_nodes, const int & n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes const int input_nodes = data_training.dimension(0); assert(n_input_nodes == input_nodes + this->n_encodings_continuous_ + 2*this->n_encodings_discrete_); assert(n_loss_output_nodes == input_nodes + 2*this->n_encodings_continuous_ + this->n_encodings_discrete_ + this->labels_training_.size()); assert(n_metric_output_nodes == input_nodes + this->labels_training_.size()); assert(data_training.dimension(0) == features.size()); assert(data_training.dimension(1) == labels_training.size()); assert(this->n_encodings_continuous_ > 0); assert(this->n_encodings_discrete_ > 0); // Gaussian Sampler Eigen::Tensor<TensorT, 4> gaussian_samples = GaussianSampler<TensorT>(batch_size, memory_size, this->n_encodings_continuous_, n_epochs); // Concrete Sampler Eigen::Tensor<TensorT, 4> categorical_samples = GumbelSampler<TensorT>(batch_size, memory_size, this->n_encodings_discrete_, n_epochs); TensorT inverse_tau = 3.0 / 2.0; //1.0 / 0.5; // Madison 2017 recommended 2/3 for tau // Dummy data for the KL divergence losses Eigen::Tensor<TensorT, 4> KL_losses_continuous(batch_size, memory_size, this->n_encodings_continuous_, n_epochs); KL_losses_continuous.setZero(); Eigen::Tensor<TensorT, 4> KL_losses_discrete(batch_size, memory_size, this->n_encodings_discrete_, n_epochs); KL_losses_discrete.setZero(); // initialize the Tensors this->input_data_training_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_training_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_training_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_training_.resize(batch_size, memory_size, n_epochs); // expand the training data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_training.dimension(1))), 1); const int over_expanded = data_training.dimension(1)*expansion_factor - batch_size * n_epochs; assert(batch_size * memory_size * n_epochs == data_training.dimension(1)*expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_training_expanded(data_training.dimension(0), data_training.dimension(1)*expansion_factor); Eigen::Tensor<std::string, 2> labels_training_expanded(data_training.dimension(1)*expansion_factor, 1); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i*data_training.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_training.dimension(0), data_training.dimension(1) }; data_training_expanded.slice(offset1, span1) = data_training; // Slices for the labels for (int j = 0; j < data_training.dimension(1); ++j) { labels_training_expanded(i*data_training.dimension(1) + j, 0) = labels_training.at(j); } //Eigen::array<Eigen::Index, 2> offset2 = { i*data_training.dimension(1), 0 }; //Eigen::array<Eigen::Index, 2> span2 = { data_training.dimension(1), 1 }; //Eigen::TensorMap<Eigen::Tensor<std::string, 2>> labels_2d(labels_training.data(), data_training.dimension(1), 1); //labels_training_expanded.slice(offset2, span2) = labels_2d; } // make the one-hot encodings Eigen::Tensor<TensorT, 2> one_hot_vec = OneHotEncoder<std::string, TensorT>(labels_training_expanded, this->labels_training_); //Eigen::Tensor<TensorT, 2> one_hot_vec_smoothed = one_hot_vec.unaryExpr(LabelSmoother<TensorT>(0.01, 0.01)); // optionally shuffle the data and labels if (shuffle_data_and_labels) { MakeShuffleMatrix<TensorT> shuffleMatrix(data_training.dimension(1) * expansion_factor, true); shuffleMatrix(data_training_expanded, true); shuffleMatrix.setShuffleMatrix(false); // re-orient for column with the same random indices shuffleMatrix(one_hot_vec, false); } // assign the input tensors auto data_training_expanded_4d = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), data_training.dimension(1)*expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_training.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_training_expanded_4d; this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = gaussian_samples; this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + this->n_encodings_continuous_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = categorical_samples; this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + this->n_encodings_continuous_ + this->n_encodings_discrete_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = categorical_samples.constant(inverse_tau); //// Check that values of the data and input tensors are correctly aligned //Eigen::Tensor<TensorT, 1> data_training_head = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_training.dimension(0) })); //Eigen::Tensor<TensorT, 1> data_training_tail = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), data_training.dimension(1)*expansion_factor - over_expanded }) //).slice(Eigen::array<Eigen::Index, 2>({ 0, batch_size * memory_size * n_epochs - 1 }), // Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_training.dimension(0) })); //Eigen::Tensor<TensorT, 1> input_training_head = this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, data_training.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_training.dimension(0) })); //Eigen::Tensor<TensorT, 1> input_training_tail = this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, data_training.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_training.dimension(0) })); //std::cout << "data_training_head\n" << data_training_head << std::endl; //std::cout << "data_training_tail\n" << data_training_tail << std::endl; //for (int i = 0; i < data_training.dimension(0); ++i) { // assert(data_training_head(i) == input_training_head(i)); // assert(data_training_tail(i) == input_training_tail(i)); //} // assign the loss tensors auto one_hot_vec_4d = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_training.dimension(1) * expansion_factor - over_expanded, one_hot_vec.dimension(1) }) ).reshape(Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_epochs, int(labels_training_.size()) }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 0,1,3,2 })); this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_training_expanded_4d; this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + this->n_encodings_continuous_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + 2 * this->n_encodings_continuous_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = KL_losses_discrete; this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + 2 * this->n_encodings_continuous_ + this->n_encodings_discrete_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, int(labels_training_.size()), n_epochs })) = one_hot_vec_4d; // assign the metric tensors this->metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_training_expanded_4d; this->metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, int(labels_training_.size()), n_epochs })) = one_hot_vec_4d; } template<typename TensorT> inline void MetabolomicsReconstructionDataSimulator<TensorT>::makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int & n_epochs, const int & batch_size, const int & memory_size, const int & n_input_nodes, const int & n_loss_output_nodes, const int & n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes const int input_nodes = data_validation.dimension(0); assert(n_input_nodes == input_nodes + this->n_encodings_continuous_ + 2 * this->n_encodings_discrete_); assert(n_loss_output_nodes == input_nodes + 2 * this->n_encodings_continuous_ + this->n_encodings_discrete_ + this->labels_validation_.size()); assert(n_metric_output_nodes == input_nodes + this->labels_validation_.size()); assert(data_validation.dimension(0) == features.size()); assert(data_validation.dimension(1) == labels_validation.size()); assert(this->n_encodings_continuous_ > 0); assert(this->n_encodings_discrete_ > 0); // Gaussian Sampler Eigen::Tensor<TensorT, 4> gaussian_samples = GaussianSampler<TensorT>(batch_size, memory_size, this->n_encodings_continuous_, n_epochs); // Concrete Sampler Eigen::Tensor<TensorT, 4> categorical_samples = GumbelSampler<TensorT>(batch_size, memory_size, this->n_encodings_discrete_, n_epochs); TensorT inverse_tau = 3.0 / 2.0; //1.0 / 0.5; // Madison 2017 recommended 2/3 for tau // Dummy data for the KL divergence losses Eigen::Tensor<TensorT, 4> KL_losses_continuous(batch_size, memory_size, this->n_encodings_continuous_, n_epochs); KL_losses_continuous.setZero(); Eigen::Tensor<TensorT, 4> KL_losses_discrete(batch_size, memory_size, this->n_encodings_discrete_, n_epochs); KL_losses_discrete.setZero(); // initialize the Tensors this->input_data_validation_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_validation_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_validation_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_validation_.resize(batch_size, memory_size, n_epochs); // expand the validation data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_validation.dimension(1))), 1); const int over_expanded = data_validation.dimension(1) * expansion_factor - batch_size * n_epochs; assert(batch_size * memory_size * n_epochs == data_validation.dimension(1) * expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_validation_expanded(data_validation.dimension(0), data_validation.dimension(1) * expansion_factor); Eigen::Tensor<std::string, 2> labels_validation_expanded(data_validation.dimension(1) * expansion_factor, 1); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i * data_validation.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_validation.dimension(0), data_validation.dimension(1) }; data_validation_expanded.slice(offset1, span1) = data_validation; // Slices for the labels for (int j = 0; j < data_validation.dimension(1); ++j) { labels_validation_expanded(i * data_validation.dimension(1) + j, 0) = labels_validation.at(j); } //Eigen::array<Eigen::Index, 2> offset2 = { i*data_validation.dimension(1), 0 }; //Eigen::array<Eigen::Index, 2> span2 = { data_validation.dimension(1), 1 }; //Eigen::TensorMap<Eigen::Tensor<std::string, 2>> labels_2d(labels_validation.data(), data_validation.dimension(1), 1); //labels_validation_expanded.slice(offset2, span2) = labels_2d; } // make the one-hot encodings Eigen::Tensor<TensorT, 2> one_hot_vec = OneHotEncoder<std::string, TensorT>(labels_validation_expanded, this->labels_validation_); //Eigen::Tensor<TensorT, 2> one_hot_vec_smoothed = one_hot_vec.unaryExpr(LabelSmoother<TensorT>(0.01, 0.01)); // optionally shuffle the data and labels if (shuffle_data_and_labels) { MakeShuffleMatrix<TensorT> shuffleMatrix(data_validation.dimension(1) * expansion_factor, true); shuffleMatrix(data_validation_expanded, true); shuffleMatrix.setShuffleMatrix(false); // re-orient for column with the same random indices shuffleMatrix(one_hot_vec, false); } // assign the input tensors auto data_validation_expanded_4d = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), data_validation.dimension(1) * expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_validation.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_validation_expanded_4d; this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = gaussian_samples; this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + this->n_encodings_continuous_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = categorical_samples; this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + this->n_encodings_continuous_ + this->n_encodings_discrete_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = categorical_samples.constant(inverse_tau); //// Check that values of the data and input tensors are correctly aligned //Eigen::Tensor<TensorT, 1> data_validation_head = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_validation.dimension(0) })); //Eigen::Tensor<TensorT, 1> data_validation_tail = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), data_validation.dimension(1)*expansion_factor - over_expanded }) //).slice(Eigen::array<Eigen::Index, 2>({ 0, batch_size * memory_size * n_epochs - 1 }), // Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_validation.dimension(0) })); //Eigen::Tensor<TensorT, 1> input_validation_head = this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, data_validation.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_validation.dimension(0) })); //Eigen::Tensor<TensorT, 1> input_validation_tail = this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, data_validation.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_validation.dimension(0) })); //std::cout << "data_validation_head\n" << data_validation_head << std::endl; //std::cout << "data_validation_tail\n" << data_validation_tail << std::endl; //for (int i = 0; i < data_validation.dimension(0); ++i) { // assert(data_validation_head(i) == input_validation_head(i)); // assert(data_validation_tail(i) == input_validation_tail(i)); //} // assign the loss tensors auto one_hot_vec_4d = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_validation.dimension(1) * expansion_factor - over_expanded, one_hot_vec.dimension(1) }) ).reshape(Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_epochs, int(labels_validation_.size()) }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 0,1,3,2 })); this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_validation_expanded_4d; this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + this->n_encodings_continuous_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + 2 * this->n_encodings_continuous_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = KL_losses_discrete; this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes + 2 * this->n_encodings_continuous_ + this->n_encodings_discrete_, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, int(labels_validation_.size()), n_epochs })) = one_hot_vec_4d; // assign the metric tensors this->metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, input_nodes, n_epochs })) = data_validation_expanded_4d; this->metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, input_nodes, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, int(labels_validation_.size()), n_epochs })) = one_hot_vec_4d; } template<typename TensorT> inline void MetabolomicsReconstructionDataSimulator<TensorT>::readAndProcessMetabolomicsTrainingAndValidationData(int & n_reaction_ids_training, int & n_labels_training, int & n_component_group_names_training, int & n_reaction_ids_validation, int & n_labels_validation, int & n_component_group_names_validation, const std::string & biochem_rxns_filename, const std::string & metabo_data_filename_train, const std::string & meta_data_filename_train, const std::string & metabo_data_filename_test, const std::string & meta_data_filename_test, const bool & use_concentrations, const bool & use_MARs, const bool & sample_values, const bool & iter_values, const bool & fill_sampling, const bool & fill_mean, const bool & fill_zero, const bool & apply_fold_change, const std::string & fold_change_ref, const TensorT & fold_change_log_base, const bool & offline_linear_scale_input, const bool & offline_log_transform_input, const bool & offline_standardize_input, const bool & online_linear_scale_input, const bool & online_log_transform_input, const bool & online_standardize_input, int & n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int & n_epochs, const int & batch_size, const int & memory_size) { // Read in the data and make the data matrices std::vector<std::string> labels_training; std::vector<std::string> features_training; Eigen::Tensor<TensorT, 2> data_training; std::vector<std::string> labels_validation; std::vector<std::string> features_validation; Eigen::Tensor<TensorT, 2> data_validation; this->readAndMakeMetabolomicsTrainingAndValidationDataMatrices(n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, features_training, data_training, labels_training, features_validation, data_validation, labels_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, use_concentrations, use_MARs, sample_values, iter_values, fill_sampling, fill_mean, fill_zero, apply_fold_change, fold_change_ref, fold_change_log_base, n_reps_per_sample, randomize_sample_group_names, n_epochs, batch_size, memory_size); // Make the training and validation data caches after an optional transformation step if (use_concentrations) { // Apply offline transformations this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, false, -1, -1, false, -1, -1); // Apply online transformations this->transformTrainingAndValidationDataOnline(data_training, data_validation, online_linear_scale_input, online_log_transform_input, online_standardize_input); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, n_component_group_names_training + this->n_encodings_continuous_ + 2*this->n_encodings_discrete_, n_component_group_names_training + 2 * this->n_encodings_continuous_ + this->n_encodings_discrete_ + this->labels_training_.size(), n_component_group_names_training + this->labels_training_.size(), shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, n_component_group_names_training + this->n_encodings_continuous_ + 2*this->n_encodings_discrete_, n_component_group_names_training + 2 * this->n_encodings_continuous_ + this->n_encodings_discrete_ + this->labels_validation_.size(), n_component_group_names_training + this->labels_validation_.size(), shuffle_data_and_labels); } else if (use_MARs) { // Apply offline transformations TensorT min_value = 1e-3; TensorT max_value = 1e3; if (offline_log_transform_input) { min_value = std::log(min_value); max_value = std::log(max_value); } this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, true, min_value, max_value, false, -1, -1); // Apply online transformations this->transformTrainingAndValidationDataOnline(data_training, data_validation, online_linear_scale_input, online_log_transform_input, online_standardize_input); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, n_reaction_ids_validation + this->n_encodings_continuous_ + 2 * this->n_encodings_discrete_, n_reaction_ids_validation + 2 * this->n_encodings_continuous_ + this->n_encodings_discrete_ + this->labels_training_.size(), n_reaction_ids_validation + this->labels_training_.size(), shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, n_reaction_ids_validation + this->n_encodings_continuous_ + 2 * this->n_encodings_discrete_, n_reaction_ids_validation + 2*this->n_encodings_continuous_ + this->n_encodings_discrete_ + this->labels_validation_.size(), n_reaction_ids_validation + this->labels_validation_.size(), shuffle_data_and_labels); } } template<typename TensorT> inline void MetabolomicsReconstructionDataSimulator<TensorT>::getNonRandomizedEncoderTrainingInputFromCacheByLabel(const std::string& label, const int& n_features, Eigen::Tensor<TensorT, 4>& input_data) { // Determine the offset of the label auto l = std::find(this->labels_training_.begin(), this->labels_training_.end(), label); // Assign the output data based on the offset if (l != std::end(this->labels_training_)) { int index = std::distance(this->labels_training_.begin(), l); int n_reps_per_sample = this->input_data_training_.dimension(0) * this->input_data_training_.dimension(3) / this->labels_training_.size(); int n_epochs_span = n_reps_per_sample / this->input_data_training_.dimension(0); input_data.resize(this->input_data_training_.dimension(0), this->input_data_training_.dimension(1), n_features, n_epochs_span); input_data = this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, n_epochs_span * index }), Eigen::array<Eigen::Index, 4>({ this->input_data_training_.dimension(0), this->input_data_training_.dimension(1), n_features, n_epochs_span })); } } template<typename TensorT> inline void MetabolomicsReconstructionDataSimulator<TensorT>::getNonRandomizedDecoderTrainingOutputFromCacheByLabel(const std::string& label, const int& n_features, Eigen::Tensor<TensorT, 4>& output_data) { // Determine the offset of the label auto l = std::find(this->labels_training_.begin(), this->labels_training_.end(), label); // Assign the output data based on the offset if (l != std::end(this->labels_training_)) { int index = std::distance(this->labels_training_.begin(), l); int n_reps_per_sample = this->loss_output_data_training_.dimension(0) * this->loss_output_data_training_.dimension(3) / this->labels_training_.size(); int n_epochs_span = n_reps_per_sample / this->loss_output_data_training_.dimension(0); output_data.resize(this->loss_output_data_training_.dimension(0), this->loss_output_data_training_.dimension(1), n_features, n_epochs_span); output_data = this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, n_epochs_span * index }), Eigen::array<Eigen::Index, 4>({ this->loss_output_data_training_.dimension(0), this->loss_output_data_training_.dimension(1), n_features, n_epochs_span })); } } } #endif //EVONET_METABOLOMICSRECONSTRUCTIONDATASIMULATOR_H<file_sep>#include <EvoNet/core/Helloworld.h> namespace EvoNet { Helloworld::Helloworld() { } Helloworld::~Helloworld() { } double Helloworld::addNumbers(const double& x, const double& y) const { return x + y; } }<file_sep>EvoNet ====== .. toctree:: :caption: First Steps :maxdepth: 1 start/start start/features changelog faq .. toctree:: :caption: Step-by-step guides :maxdepth: 3 guide/install guide/build guide/usage .. toctree:: :caption: About Smartpeak :maxdepth: 1 about/contribute about/developer api/library_root .. toctree:: :caption: Other :maxdepth: 1 genindex .. include:: ../README.rst :start-after: begin_badges :end-before: end_badges Overview -------- .. role:: bash(code) :language: bash .. include:: ../README.rst :start-after: begin_introduction :end-before: end_introduction TODO ==== .. todolist:: Credit ====== .. begin_credit This project could have not existed without the excellent tools available: Boost, Eigen, Doxygen, Sphinx, and many others. .. end_credit License ======= This project uses a MIT license, with the hope that it'll be accessible to most users. If you require a different license, please raise an issue and I will consider a dual license. The full license is `available here <https://github.com/dmccloskey/EvoNet/blob/develop/LICENSE>`_.<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ActivationFunctionTensor test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ActivationFunctionTensor.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(activationFunction) /** ReLUTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorReluTensorOp) { ReLUTensorOp<double, Eigen::DefaultDevice>* ptrReLU = nullptr; ReLUTensorOp<double, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorReluTensorOp) { ReLUTensorOp<double, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new ReLUTensorOp<double, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(gettersAndSettersReluTensorOp) { // Test defaults ReLUTensorOp<double, Eigen::DefaultDevice> operation_defaults; BOOST_CHECK_CLOSE(operation_defaults.getEps(), 1e-24, 1e-6); BOOST_CHECK_CLOSE(operation_defaults.getMin(), -1e9, 1e-6); BOOST_CHECK_CLOSE(operation_defaults.getMax(), 1e9, 1e-6); // Test setters operation_defaults.setEps(10); operation_defaults.setMin(20); operation_defaults.setMax(30); BOOST_CHECK_CLOSE(operation_defaults.getEps(), 10, 1e-6); BOOST_CHECK_CLOSE(operation_defaults.getMin(), 20, 1e-6); BOOST_CHECK_CLOSE(operation_defaults.getMax(), 30, 1e-6); // Test constructor ReLUTensorOp<double, Eigen::DefaultDevice> operation(10, 20, 30); BOOST_CHECK_CLOSE(operation.getEps(), 10, 1e-6); BOOST_CHECK_CLOSE(operation.getMin(), 20, 1e-6); BOOST_CHECK_CLOSE(operation.getMax(), 30, 1e-6); } BOOST_AUTO_TEST_CASE(operationfunctionReluTensorOp) { ReLUTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{0,0}, {0,0}}, {{0,0}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameReLUTensorOp) { ReLUTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ReLUTensorOp"); } /** ReLUGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorReluGradTensorOp) { ReLUGradTensorOp<double, Eigen::DefaultDevice>* ptrReLUGrad = nullptr; ReLUGradTensorOp<double, Eigen::DefaultDevice>* nullPointerReLUGrad = nullptr; BOOST_CHECK_EQUAL(ptrReLUGrad, nullPointerReLUGrad); } BOOST_AUTO_TEST_CASE(destructorReluGradTensorOp) { ReLUGradTensorOp<double, Eigen::DefaultDevice>* ptrReLUGrad = nullptr; ptrReLUGrad = new ReLUGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrReLUGrad; } BOOST_AUTO_TEST_CASE(operationfunctionReluGradTensorOp) { ReLUGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{0,0}, {0,0}}, {{0,0}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameReLUGradTensorOp) { ReLUGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ReLUGradTensorOp"); } /** ELUTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorEluTensorOp) { ELUTensorOp<double, Eigen::DefaultDevice>* ptrELU = nullptr; ELUTensorOp<double, Eigen::DefaultDevice>* nullPointerELU = nullptr; BOOST_CHECK_EQUAL(ptrELU, nullPointerELU); } BOOST_AUTO_TEST_CASE(destructorEluTensorOp) { ELUTensorOp<double, Eigen::DefaultDevice>* ptrELU = nullptr; ptrELU = new ELUTensorOp<double, Eigen::DefaultDevice>(); delete ptrELU; } BOOST_AUTO_TEST_CASE(gettersAndSettersEluTensorOp) { ELUTensorOp<double, Eigen::DefaultDevice> operation; operation.setAlpha(1.0); BOOST_CHECK_EQUAL(operation.getAlpha(), 1.0); } BOOST_AUTO_TEST_CASE(operationfunctionEluTensorOp) { ELUTensorOp<double, Eigen::DefaultDevice> operation(1.0); const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-0.63212055882855767,-0.63212055882855767}, {0,0}}, {{-0.99995460007023751,-0.99995460007023751}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameELUTensorOp) { ELUTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ELUTensorOp"); } /** ELUGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorEluGradTensorOp) { ELUGradTensorOp<double, Eigen::DefaultDevice>* ptrELU = nullptr; ELUGradTensorOp<double, Eigen::DefaultDevice>* nullPointerELU = nullptr; BOOST_CHECK_EQUAL(ptrELU, nullPointerELU); } BOOST_AUTO_TEST_CASE(destructorEluGradTensorOp) { ELUGradTensorOp<double, Eigen::DefaultDevice>* ptrELU = nullptr; ptrELU = new ELUGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrELU; } BOOST_AUTO_TEST_CASE(gettersAndSettersEluGradTensorOp) { ELUGradTensorOp<double, Eigen::DefaultDevice> operation; operation.setAlpha(1.0); BOOST_CHECK_EQUAL(operation.getAlpha(), 1.0); } BOOST_AUTO_TEST_CASE(operationfunctionEluGradTensorOp) { ELUGradTensorOp<double, Eigen::DefaultDevice> operation(1.0); const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{0.36787944117144233,0.36787944117144233}, {0,0}}, {{4.5399929762490743e-05,4.5399929762490743e-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameELUGradTensorOp) { ELUGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ELUGradTensorOp"); } /** SigmoidTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorSigmoidTensorOp) { SigmoidTensorOp<double, Eigen::DefaultDevice>* ptrSigmoid = nullptr; SigmoidTensorOp<double, Eigen::DefaultDevice>* nullPointerSigmoid = nullptr; BOOST_CHECK_EQUAL(ptrSigmoid, nullPointerSigmoid); } BOOST_AUTO_TEST_CASE(destructorSigmoidTensorOp) { SigmoidTensorOp<double, Eigen::DefaultDevice>* ptrSigmoid = nullptr; ptrSigmoid = new SigmoidTensorOp<double, Eigen::DefaultDevice>(); delete ptrSigmoid; } BOOST_AUTO_TEST_CASE(operationfunctionSigmoidTensorOp) { SigmoidTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0.5,0.5}, {0,0}}, {{0.7310585786300049,0.7310585786300049}, {0,0}}, {{0.99995460213129761,0.99995460213129761}, {0,0}}, {{0.2689414213699951,0.2689414213699951}, {0,0}}, {{4.5397868702434395e-05,4.5397868702434395e-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameSigmoidTensorOp) { SigmoidTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "SigmoidTensorOp"); } /** SigmoidGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorSigmoidGradTensorOp) { SigmoidGradTensorOp<double, Eigen::DefaultDevice>* ptrSigmoidGrad = nullptr; SigmoidGradTensorOp<double, Eigen::DefaultDevice>* nullPointerSigmoidGrad = nullptr; BOOST_CHECK_EQUAL(ptrSigmoidGrad, nullPointerSigmoidGrad); } BOOST_AUTO_TEST_CASE(destructorSigmoidGradTensorOp) { SigmoidGradTensorOp<double, Eigen::DefaultDevice>* ptrSigmoidGrad = nullptr; ptrSigmoidGrad = new SigmoidGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrSigmoidGrad; } BOOST_AUTO_TEST_CASE(operationfunctionSigmoidGradTensorOp) { SigmoidGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0.25,0.25}, {0,0}}, {{0.19661193324148185,0.19661193324148185}, {0,0}}, {{4.5395807735907655e-05,4.5395807735907655e-05}, {0,0}}, {{0.19661193324148185,0.19661193324148185}, {0,0}}, {{4.53958091e-05,4.53958091e-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameSigmoidGradTensorOp) { SigmoidGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "SigmoidGradTensorOp"); } /** TanHTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorTanHTensorOp) { TanHTensorOp<double, Eigen::DefaultDevice>* ptrTanH = nullptr; TanHTensorOp<double, Eigen::DefaultDevice>* nullPointerTanH = nullptr; BOOST_CHECK_EQUAL(ptrTanH, nullPointerTanH); } BOOST_AUTO_TEST_CASE(destructorTanHTensorOp) { TanHTensorOp<double, Eigen::DefaultDevice>* ptrTanH = nullptr; ptrTanH = new TanHTensorOp<double, Eigen::DefaultDevice>(); delete ptrTanH; } BOOST_AUTO_TEST_CASE(operationfunctionTanHTensorOp) { TanHTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0.0,0.0}, {0,0}}, {{0.76159415595576485,0.76159415595576485}, {0,0}}, {{0.99999999587769262,0.99999999587769262}, {0,0}}, {{-0.76159415595576485,-0.76159415595576485}, {0,0}}, {{-0.99999999587769262,-0.99999999587769262}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameTanHTensorOp) { TanHTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "TanHTensorOp"); } /** TanHGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorTanHGradTensorOp) { TanHGradTensorOp<double, Eigen::DefaultDevice>* ptrTanHGrad = nullptr; TanHGradTensorOp<double, Eigen::DefaultDevice>* nullPointerTanHGrad = nullptr; BOOST_CHECK_EQUAL(ptrTanHGrad, nullPointerTanHGrad); } BOOST_AUTO_TEST_CASE(destructorTanHGradTensorOp) { TanHGradTensorOp<double, Eigen::DefaultDevice>* ptrTanHGrad = nullptr; ptrTanHGrad = new TanHGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrTanHGrad; } BOOST_AUTO_TEST_CASE(operationfunctionTanHGradTensorOp) { TanHGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{0.41997434161402614,0.41997434161402614}, {0,0}}, {{8.2446145466263943e-09,8.2446145466263943e-09}, {0,0}}, {{0.41997434161402614,0.41997434161402614}, {0,0}}, {{8.2446145466263943e-09,8.2446145466263943e-09}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameTanHGradTensorOp) { TanHGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "TanHGradTensorOp"); } /** ReTanHTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorReTanHTensorOp) { ReTanHTensorOp<double, Eigen::DefaultDevice>* ptrReTanH = nullptr; ReTanHTensorOp<double, Eigen::DefaultDevice>* nullPointerReTanH = nullptr; BOOST_CHECK_EQUAL(ptrReTanH, nullPointerReTanH); } BOOST_AUTO_TEST_CASE(destructorReTanHTensorOp) { ReTanHTensorOp<double, Eigen::DefaultDevice>* ptrReTanH = nullptr; ptrReTanH = new ReTanHTensorOp<double, Eigen::DefaultDevice>(); delete ptrReTanH; } // [TODO: need to re-implement] //BOOST_AUTO_TEST_CASE(operationfunctionReTanHTensorOp) //{ // ReTanHTensorOp<double, Eigen::DefaultDevice> operation; // const int batch_size = 5; // const int memory_size = 2; // const int layer_size = 2; // Eigen::DefaultDevice device; // Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); // input.setValues({ // {{0,0}, {0,0}}, // {{1,1}, {0,0}}, // {{10,10}, {0,0}}, // {{-1,-1}, {0,0}}, // {{-10,-10}, {0,0}} }); // Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); // output.setZero(); // Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); // test.setValues({ // {{0,0}, {0,0}}, // {{0.76159415595576485,0.76159415595576485}, {0,0}}, // {{0.99999999587769262,0.99999999587769262}, {0,0}}, // {{0,0}, {0,0}}, // {{0,0}, {0,0}} }); // // operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // // // Test // for (int i = 0; i < batch_size; ++i) { // for (int j = 0; j < memory_size; ++j) { // for (int k = 0; k < layer_size; ++k) { // BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); // } // } // } //} BOOST_AUTO_TEST_CASE(getNameReTanHTensorOp) { ReTanHTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ReTanHTensorOp"); } /** ReTanHGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorReTanHGradTensorOp) { ReTanHGradTensorOp<double, Eigen::DefaultDevice>* ptrReTanHGrad = nullptr; ReTanHGradTensorOp<double, Eigen::DefaultDevice>* nullPointerReTanHGrad = nullptr; BOOST_CHECK_EQUAL(ptrReTanHGrad, nullPointerReTanHGrad); } BOOST_AUTO_TEST_CASE(destructorReTanHGradTensorOp) { ReTanHGradTensorOp<double, Eigen::DefaultDevice>* ptrReTanHGrad = nullptr; ptrReTanHGrad = new ReTanHGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrReTanHGrad; } // TODO: need to re-implement //BOOST_AUTO_TEST_CASE(operationfunctionReTanHGradTensorOp) //{ // ReTanHGradTensorOp<double, Eigen::DefaultDevice> operation; // const int batch_size = 5; // const int memory_size = 2; // const int layer_size = 2; // Eigen::DefaultDevice device; // Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); // input.setValues({ // {{0,0}, {0,0}}, // {{1,1}, {0,0}}, // {{10,10}, {0,0}}, // {{-1,-1}, {0,0}}, // {{-10,-10}, {0,0}} }); // Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); // output.setZero(); // Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); // test.setValues({ // {{0,0}, {0,0}}, // {{0.41997434161402614,0.41997434161402614}, {0,0}}, // {{8.2446147686709992e-09,8.2446147686709992e-09}, {0,0}}, // {{0,0}, {0,0}}, // {{0,0}, {0,0}} }); // // operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // // // Test // for (int i = 0; i < batch_size; ++i) { // for (int j = 0; j < memory_size; ++j) { // for (int k = 0; k < layer_size; ++k) { // BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); // } // } // } //} BOOST_AUTO_TEST_CASE(getNameReTanHGradTensorOp) { ReTanHGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ReTanHGradTensorOp"); } /** LinearTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorLinearTensorOp) { LinearTensorOp<double, Eigen::DefaultDevice>* ptrLinear = nullptr; LinearTensorOp<double, Eigen::DefaultDevice>* nullPointerLinear = nullptr; BOOST_CHECK_EQUAL(ptrLinear, nullPointerLinear); } BOOST_AUTO_TEST_CASE(destructorLinearTensorOp) { LinearTensorOp<double, Eigen::DefaultDevice>* ptrLinear = nullptr; ptrLinear = new LinearTensorOp<double, Eigen::DefaultDevice>(); delete ptrLinear; } BOOST_AUTO_TEST_CASE(operationfunctionLinearTensorOp) { LinearTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameLinearTensorOp) { LinearTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "LinearTensorOp"); } /** LinearGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorLinearGradTensorOp) { LinearGradTensorOp<double, Eigen::DefaultDevice>* ptrLinearGrad = nullptr; LinearGradTensorOp<double, Eigen::DefaultDevice>* nullPointerLinearGrad = nullptr; BOOST_CHECK_EQUAL(ptrLinearGrad, nullPointerLinearGrad); } BOOST_AUTO_TEST_CASE(destructorLinearGradTensorOp) { LinearGradTensorOp<double, Eigen::DefaultDevice>* ptrLinearGrad = nullptr; ptrLinearGrad = new LinearGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrLinearGrad; } BOOST_AUTO_TEST_CASE(operationfunctionLinearGradTensorOp) { LinearGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{1,1}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameLinearGradTensorOp) { LinearGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "LinearGradTensorOp"); } /** InverseTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorInverseTensorOp) { InverseTensorOp<double, Eigen::DefaultDevice>* ptrInverse = nullptr; InverseTensorOp<double, Eigen::DefaultDevice>* nullPointerInverse = nullptr; BOOST_CHECK_EQUAL(ptrInverse, nullPointerInverse); } BOOST_AUTO_TEST_CASE(destructorInverseTensorOp) { InverseTensorOp<double, Eigen::DefaultDevice>* ptrInverse = nullptr; ptrInverse = new InverseTensorOp<double, Eigen::DefaultDevice>(); delete ptrInverse; } BOOST_AUTO_TEST_CASE(operationfunctionInverseTensorOp) { InverseTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{0.1,0.1}, {0,0}}, {{-1,-1}, {0,0}}, {{-0.1,-0.1}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i,j,k), test(i,j,k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameInverseTensorOp) { InverseTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "InverseTensorOp"); } /** InverseGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorInverseGradTensorOp) { InverseGradTensorOp<double, Eigen::DefaultDevice>* ptrInverseGrad = nullptr; InverseGradTensorOp<double, Eigen::DefaultDevice>* nullPointerInverseGrad = nullptr; BOOST_CHECK_EQUAL(ptrInverseGrad, nullPointerInverseGrad); } BOOST_AUTO_TEST_CASE(destructorInverseGradTensorOp) { InverseGradTensorOp<double, Eigen::DefaultDevice>* ptrInverseGrad = nullptr; ptrInverseGrad = new InverseGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrInverseGrad; } BOOST_AUTO_TEST_CASE(operationfunctionInverseGradTensorOp) { InverseGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,0}, {0,0}}, {{-1,-1}, {0,0}}, {{-0.01,-0.01}, {0,0}}, {{-1,-1}, {0,0}}, {{-0.01,-0.01}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameInverseGradTensorOp) { InverseGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "InverseGradTensorOp"); } /** ExponentialTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorExponentialTensorOp) { ExponentialTensorOp<double, Eigen::DefaultDevice>* ptrExponential = nullptr; ExponentialTensorOp<double, Eigen::DefaultDevice>* nullPointerExponential = nullptr; BOOST_CHECK_EQUAL(ptrExponential, nullPointerExponential); } BOOST_AUTO_TEST_CASE(destructorExponentialTensorOp) { ExponentialTensorOp<double, Eigen::DefaultDevice>* ptrExponential = nullptr; ptrExponential = new ExponentialTensorOp<double, Eigen::DefaultDevice>(); delete ptrExponential; } BOOST_AUTO_TEST_CASE(operationfunctionExponentialTensorOp) { ExponentialTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{2.718281828,2.718281828}, {0,0}}, {{22026.46579,22026.46579}, {0,0}}, {{0.367879441,0.367879441}, {0,0}}, {{4.53999E-05,4.53999E-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameExponentialTensorOp) { ExponentialTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ExponentialTensorOp"); } /** ExponentialGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorExponentialGradTensorOp) { ExponentialGradTensorOp<double, Eigen::DefaultDevice>* ptrExponentialGrad = nullptr; ExponentialGradTensorOp<double, Eigen::DefaultDevice>* nullPointerExponentialGrad = nullptr; BOOST_CHECK_EQUAL(ptrExponentialGrad, nullPointerExponentialGrad); } BOOST_AUTO_TEST_CASE(destructorExponentialGradTensorOp) { ExponentialGradTensorOp<double, Eigen::DefaultDevice>* ptrExponentialGrad = nullptr; ptrExponentialGrad = new ExponentialGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrExponentialGrad; } BOOST_AUTO_TEST_CASE(operationfunctionExponentialGradTensorOp) { ExponentialGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{2.718281828,2.718281828}, {0,0}}, {{22026.46579,22026.46579}, {0,0}}, {{0.367879441,0.367879441}, {0,0}}, {{4.53999E-05,4.53999E-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameExponentialGradTensorOp) { ExponentialGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ExponentialGradTensorOp"); } /** LogTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorLogTensorOp) { LogTensorOp<double, Eigen::DefaultDevice>* ptrLog = nullptr; LogTensorOp<double, Eigen::DefaultDevice>* nullPointerLog = nullptr; BOOST_CHECK_EQUAL(ptrLog, nullPointerLog); } BOOST_AUTO_TEST_CASE(destructorLogTensorOp) { LogTensorOp<double, Eigen::DefaultDevice>* ptrLog = nullptr; ptrLog = new LogTensorOp<double, Eigen::DefaultDevice>(); delete ptrLog; } BOOST_AUTO_TEST_CASE(operationfunctionLogTensorOp) { LogTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{-55.262042231857095,-55.262042231857095}, {0,0}}, {{0,0}, {0,0}}, {{2.3025850929940459,2.3025850929940459}, {0,0}}, {{-55.262042231857095,-55.262042231857095}, {0,0}}, {{-55.262042231857095,-55.262042231857095}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameLogTensorOp) { LogTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "LogTensorOp"); } /** LogGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorLogGradTensorOp) { LogGradTensorOp<double, Eigen::DefaultDevice>* ptrLogGrad = nullptr; LogGradTensorOp<double, Eigen::DefaultDevice>* nullPointerLogGrad = nullptr; BOOST_CHECK_EQUAL(ptrLogGrad, nullPointerLogGrad); } BOOST_AUTO_TEST_CASE(destructorLogGradTensorOp) { LogGradTensorOp<double, Eigen::DefaultDevice>* ptrLogGrad = nullptr; ptrLogGrad = new LogGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrLogGrad; } BOOST_AUTO_TEST_CASE(operationfunctionLogGradTensorOp) { LogGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,0}, {0,0}}, // was 1e9 prior to selection {{1,1}, {0,0}}, {{0.1,0.1}, {0,0}}, {{-1,-1}, {0,0}}, {{-0.1,-0.1}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameLogGradTensorOp) { LogGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "LogGradTensorOp"); } /** PowTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorPowTensorOp) { PowTensorOp<double, Eigen::DefaultDevice>* ptrPow = nullptr; PowTensorOp<double, Eigen::DefaultDevice>* nullPointerPow = nullptr; BOOST_CHECK_EQUAL(ptrPow, nullPointerPow); } BOOST_AUTO_TEST_CASE(destructorPowTensorOp) { PowTensorOp<double, Eigen::DefaultDevice>* ptrPow = nullptr; ptrPow = new PowTensorOp<double, Eigen::DefaultDevice>(2); delete ptrPow; } BOOST_AUTO_TEST_CASE(operationfunctionPowTensorOp) { PowTensorOp<double, Eigen::DefaultDevice> operation(0.5); const int batch_size = 7; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{1e12,1e12}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}}, {{-1e12,-1e12}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{3.1622776601683795,3.1622776601683795}, {0,0}}, {{31622.776601683792,31622.776601683792}, {0,0}}, {{0,0}, {0,0}}, {{0,0}, {0,0}}, {{0,0}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } PowTensorOp<double, Eigen::DefaultDevice> operation2(2.0); output.setZero(); test.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{100,100}, {0,0}}, {{1e9,1e9}, {0,0}}, {{1,1}, {0,0}}, // TODO: Clip does not fix -nan(ind) {{100,100}, {0,0}}, // TODO: Clip does not fix -nan(ind) {{1e9,1e9}, {0,0}} }); operation2(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNamePowTensorOp) { PowTensorOp<double, Eigen::DefaultDevice> operation(0.5); BOOST_CHECK_EQUAL(operation.getName(), "PowTensorOp"); } /** PowGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorPowGradTensorOp) { PowGradTensorOp<double, Eigen::DefaultDevice>* ptrPowGrad = nullptr; PowGradTensorOp<double, Eigen::DefaultDevice>* nullPointerPowGrad = nullptr; BOOST_CHECK_EQUAL(ptrPowGrad, nullPointerPowGrad); } BOOST_AUTO_TEST_CASE(destructorPowGradTensorOp) { PowGradTensorOp<double, Eigen::DefaultDevice>* ptrPowGrad = nullptr; ptrPowGrad = new PowGradTensorOp<double, Eigen::DefaultDevice>(0.5); delete ptrPowGrad; } BOOST_AUTO_TEST_CASE(operationfunctionPowGradTensorOp) { PowGradTensorOp<double, Eigen::DefaultDevice> operation(0.5); const int batch_size = 7; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{1e9,1e9}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}}, {{-1e9,-1e9}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1.0e9,1.0e9}, {0,0}}, {{0.5,0.5}, {0,0}}, {{0.15811388300841897,0.15811388300841897}, {0,0}}, {{1.5811388300841898e-05,1.5811388300841898e-05}, {0,0}}, {{1.0e9,1.0e9}, {0,0}}, // TODO: why is this not zero? {{1.0e9,1.0e9}, {0,0}}, {{1.0e9,1.0e9}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } PowGradTensorOp<double, Eigen::DefaultDevice> operation2(2); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{1e9,1e9}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}}, {{-1e9,-1e9}, {0,0}} }); output.setZero(); test.setValues({ {{0,0}, {0,0}}, {{2.0,2.0}, {0,0}}, {{20,20}, {0,0}}, {{1e9,1e9}, {0,0}}, {{-2,-2}, {0,0}}, {{-20,-20}, {0,0}}, {{-1e9,-1e9}, {0,0}} }); operation2(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNamePowGradTensorOp) { PowGradTensorOp<double, Eigen::DefaultDevice> operation(0.5); BOOST_CHECK_EQUAL(operation.getName(), "PowGradTensorOp"); } /** LeakyReLUTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorLeakyReLUTensorOp) { LeakyReLUTensorOp<double, Eigen::DefaultDevice>* ptrLeakyReLU = nullptr; LeakyReLUTensorOp<double, Eigen::DefaultDevice>* nullPointerLeakyReLU = nullptr; BOOST_CHECK_EQUAL(ptrLeakyReLU, nullPointerLeakyReLU); } BOOST_AUTO_TEST_CASE(destructorLeakyReLUTensorOp) { LeakyReLUTensorOp<double, Eigen::DefaultDevice>* ptrLeakyReLU = nullptr; ptrLeakyReLU = new LeakyReLUTensorOp<double, Eigen::DefaultDevice>(); delete ptrLeakyReLU; } BOOST_AUTO_TEST_CASE(gettersAndSettersLeakyReLUTensorOp) { LeakyReLUTensorOp<double, Eigen::DefaultDevice> operation; operation.setAlpha(1.0); BOOST_CHECK_EQUAL(operation.getAlpha(), 1.0); } BOOST_AUTO_TEST_CASE(operationfunctionLeakyReLUTensorOp) { LeakyReLUTensorOp<double, Eigen::DefaultDevice> operation(0.1); const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-0.1,-0.1}, {0,0}}, {{-1,-1}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameLeakyReLUTensorOp) { LeakyReLUTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "LeakyReLUTensorOp"); } /** LeakyReLUGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorLeakyReLUGradTensorOp) { LeakyReLUGradTensorOp<double, Eigen::DefaultDevice>* ptrLeakyReLU = nullptr; LeakyReLUGradTensorOp<double, Eigen::DefaultDevice>* nullPointerLeakyReLU = nullptr; BOOST_CHECK_EQUAL(ptrLeakyReLU, nullPointerLeakyReLU); } BOOST_AUTO_TEST_CASE(destructorLeakyReLUGradTensorOp) { LeakyReLUGradTensorOp<double, Eigen::DefaultDevice>* ptrLeakyReLU = nullptr; ptrLeakyReLU = new LeakyReLUGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrLeakyReLU; } BOOST_AUTO_TEST_CASE(gettersAndSettersLeakyReLUGradTensorOp) { LeakyReLUGradTensorOp<double, Eigen::DefaultDevice> operation; operation.setAlpha(1.0); BOOST_CHECK_EQUAL(operation.getAlpha(), 1.0); } BOOST_AUTO_TEST_CASE(operationfunctionLeakyReLUGradTensorOp) { LeakyReLUGradTensorOp<double, Eigen::DefaultDevice> operation(0.1); const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{1,1}, {0,0}}, {{0.1,0.1}, {0,0}}, {{0.1,0.1}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameLeakyReLUGradTensorOp) { LeakyReLUGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "LeakyReLUGradTensorOp"); } /** SinTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorSinTensorOp) { SinTensorOp<double, Eigen::DefaultDevice>* ptrSin = nullptr; SinTensorOp<double, Eigen::DefaultDevice>* nullPointerSin = nullptr; BOOST_CHECK_EQUAL(ptrSin, nullPointerSin); } BOOST_AUTO_TEST_CASE(destructorSinTensorOp) { SinTensorOp<double, Eigen::DefaultDevice>* ptrSin = nullptr; ptrSin = new SinTensorOp<double, Eigen::DefaultDevice>(); delete ptrSin; } BOOST_AUTO_TEST_CASE(operationfunctionSinTensorOp) { SinTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{2.718281828,2.718281828}, {0,0}}, {{22026.46579,22026.46579}, {0,0}}, {{0.367879441,0.367879441}, {0,0}}, {{4.53999E-05,4.53999E-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { //BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); //TODO: fixme } } } } BOOST_AUTO_TEST_CASE(getNameSinTensorOp) { SinTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "SinTensorOp"); } /** SinGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorSinGradTensorOp) { SinGradTensorOp<double, Eigen::DefaultDevice>* ptrSinGrad = nullptr; SinGradTensorOp<double, Eigen::DefaultDevice>* nullPointerSinGrad = nullptr; BOOST_CHECK_EQUAL(ptrSinGrad, nullPointerSinGrad); } BOOST_AUTO_TEST_CASE(destructorSinGradTensorOp) { SinGradTensorOp<double, Eigen::DefaultDevice>* ptrSinGrad = nullptr; ptrSinGrad = new SinGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrSinGrad; } BOOST_AUTO_TEST_CASE(operationfunctionSinGradTensorOp) { SinGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{2.718281828,2.718281828}, {0,0}}, {{22026.46579,22026.46579}, {0,0}}, {{0.367879441,0.367879441}, {0,0}}, {{4.53999E-05,4.53999E-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { //BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); //TODO: fixme } } } } BOOST_AUTO_TEST_CASE(getNameSinGradTensorOp) { SinGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "SinGradTensorOp"); } /** CosTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorCosTensorOp) { CosTensorOp<double, Eigen::DefaultDevice>* ptrCos = nullptr; CosTensorOp<double, Eigen::DefaultDevice>* nullPointerCos = nullptr; BOOST_CHECK_EQUAL(ptrCos, nullPointerCos); } BOOST_AUTO_TEST_CASE(destructorCosTensorOp) { CosTensorOp<double, Eigen::DefaultDevice>* ptrCos = nullptr; ptrCos = new CosTensorOp<double, Eigen::DefaultDevice>(); delete ptrCos; } BOOST_AUTO_TEST_CASE(operationfunctionCosTensorOp) { CosTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{2.718281828,2.718281828}, {0,0}}, {{22026.46579,22026.46579}, {0,0}}, {{0.367879441,0.367879441}, {0,0}}, {{4.53999E-05,4.53999E-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { //BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); //TODO: fixme } } } } BOOST_AUTO_TEST_CASE(getNameCosTensorOp) { CosTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "CosTensorOp"); } /** CosGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorCosGradTensorOp) { CosGradTensorOp<double, Eigen::DefaultDevice>* ptrCosGrad = nullptr; CosGradTensorOp<double, Eigen::DefaultDevice>* nullPointerCosGrad = nullptr; BOOST_CHECK_EQUAL(ptrCosGrad, nullPointerCosGrad); } BOOST_AUTO_TEST_CASE(destructorCosGradTensorOp) { CosGradTensorOp<double, Eigen::DefaultDevice>* ptrCosGrad = nullptr; ptrCosGrad = new CosGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrCosGrad; } BOOST_AUTO_TEST_CASE(operationfunctionCosGradTensorOp) { CosGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{1,1}, {0,0}}, {{2.718281828,2.718281828}, {0,0}}, {{22026.46579,22026.46579}, {0,0}}, {{0.367879441,0.367879441}, {0,0}}, {{4.53999E-05,4.53999E-05}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { //BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); //TODO: fixme } } } } BOOST_AUTO_TEST_CASE(getNameCosGradTensorOp) { CosGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "CosGradTensorOp"); } BOOST_AUTO_TEST_CASE(operationfunctionBatchNormTensorOp) { BatchNormTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0.1}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,2.2360679774997898}, {0,0}}, {{2.2360679774997898,2.2360679774997898}, {0,0}}, {{2.2360679774997898,2.2360679774997898}, {0,0}}, {{-2.2360679774997898,-2.2360679774997898}, {0,0}}, {{-2.2360679774997898,-2.2360679774997898}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); //TODO: fixme } } } } BOOST_AUTO_TEST_CASE(getNameBatchNormTensorOp) { BatchNormTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "BatchNormTensorOp"); } /** BatchNormGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorBatchNormGradTensorOp) { BatchNormGradTensorOp<double, Eigen::DefaultDevice>* ptrBatchNormGrad = nullptr; BatchNormGradTensorOp<double, Eigen::DefaultDevice>* nullPointerBatchNormGrad = nullptr; BOOST_CHECK_EQUAL(ptrBatchNormGrad, nullPointerBatchNormGrad); } BOOST_AUTO_TEST_CASE(destructorBatchNormGradTensorOp) { BatchNormGradTensorOp<double, Eigen::DefaultDevice>* ptrBatchNormGrad = nullptr; ptrBatchNormGrad = new BatchNormGradTensorOp<double, Eigen::DefaultDevice>(); delete ptrBatchNormGrad; } BOOST_AUTO_TEST_CASE(operationfunctionBatchNormGradTensorOp) { BatchNormGradTensorOp<double, Eigen::DefaultDevice> operation; const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0.1}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size); output.setZero(); Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size); test.setValues({ {{0,4.5519144009631268e-15}, {0,0}}, {{0,1.7948528498063337e-15}, {0,0}}, {{0,1.776535187764253e-15}, {0,0}}, {{0,1.7934306267839513e-15}, {0,0}}, {{0,1.7765337666674127e-15}, {0,0}} }); operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device); // Test for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < memory_size; ++j) { for (int k = 0; k < layer_size; ++k) { BOOST_CHECK_CLOSE(output(i, j, k), test(i, j, k), 1e-4); //TODO: fixme } } } } BOOST_AUTO_TEST_CASE(getNameBatchNormGradTensorOp) { BatchNormGradTensorOp<double, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "BatchNormGradTensorOp"); } BOOST_AUTO_TEST_CASE(operatorGradientCheckTensorOp) { // Define the gradient checker GradientCheckTensorOp<double, Eigen::DefaultDevice> operation; operation.eps_ = 1e-7; // Setup the input const int batch_size = 5; const int memory_size = 2; const int layer_size = 2; Eigen::DefaultDevice device; Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size); input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); Eigen::Tensor<double, 3> input_f_plus(batch_size, memory_size, layer_size); input_f_plus.setZero(); Eigen::Tensor<double, 3> input_f_min(batch_size, memory_size, layer_size); input_f_min.setZero(); Eigen::Tensor<double, 3> input_b(batch_size, memory_size, layer_size); input_b.setZero(); Eigen::Tensor<double, 0> output; output.setZero(); // Check Sigmoid operation.forward_ = std::make_shared<SigmoidTensorOp<double, Eigen::DefaultDevice>>(SigmoidTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<SigmoidGradTensorOp<double, Eigen::DefaultDevice>>(SigmoidGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check ReLU input.setValues({ {{0.1,0.1}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<ReLUTensorOp<double, Eigen::DefaultDevice>>(ReLUTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<ReLUGradTensorOp<double, Eigen::DefaultDevice>>(ReLUGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check ELU input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<ELUTensorOp<double, Eigen::DefaultDevice>>(ELUTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<ELUGradTensorOp<double, Eigen::DefaultDevice>>(ELUGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check LeakyReLU input.setValues({ {{0.1,0.1}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<LeakyReLUTensorOp<double, Eigen::DefaultDevice>>(LeakyReLUTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<LeakyReLUGradTensorOp<double, Eigen::DefaultDevice>>(LeakyReLUGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check TanH input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<TanHTensorOp<double, Eigen::DefaultDevice>>(TanHTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<TanHGradTensorOp<double, Eigen::DefaultDevice>>(TanHGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check Linear input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<LinearTensorOp<double, Eigen::DefaultDevice>>(LinearTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<LinearGradTensorOp<double, Eigen::DefaultDevice>>(LinearGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check Pow input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<PowTensorOp<double, Eigen::DefaultDevice>>(PowTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<PowGradTensorOp<double, Eigen::DefaultDevice>>(PowGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check Inverse input.setValues({ {{0.1,0.1}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<InverseTensorOp<double, Eigen::DefaultDevice>>(InverseTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<InverseGradTensorOp<double, Eigen::DefaultDevice>>(InverseGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check Log input.setValues({ {{0.1,0.1}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{100,100}, {0,0}}, {{1000,1000}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<LogTensorOp<double, Eigen::DefaultDevice>>(LogTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<LogGradTensorOp<double, Eigen::DefaultDevice>>(LogGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check Exponential input.setValues({ {{0,0}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<ExponentialTensorOp<double, Eigen::DefaultDevice>>(ExponentialTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<ExponentialGradTensorOp<double, Eigen::DefaultDevice>>(ExponentialGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); // Check BatchNorm input.setValues({ {{10,0.1}, {0,0}}, {{1,1}, {0,0}}, {{10,10}, {0,0}}, {{-1,-1}, {0,0}}, {{-10,-10}, {0,0}} }); input_f_plus.setZero(); input_f_min.setZero(); input_b.setZero(); output.setZero(); operation.forward_ = std::make_shared<BatchNormTensorOp<double, Eigen::DefaultDevice>>(BatchNormTensorOp<double, Eigen::DefaultDevice>()); operation.reverse_ = std::make_shared<BatchNormGradTensorOp<double, Eigen::DefaultDevice>>(BatchNormGradTensorOp<double, Eigen::DefaultDevice>()); operation(input.data(), input_f_plus.data(), input_f_min.data(), input_b.data(), output.data(), batch_size, memory_size, layer_size, 0, device); BOOST_CHECK(output(0) < operation.eps_); std::cout << "Gradient Check BatchNorm: " << output(0) << std::endl; } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELINTERPRETERDEFAULTDEVICE_H #define EVONET_MODELINTERPRETERDEFAULTDEVICE_H #define EIGEN_USE_THREADS // .h #include <EvoNet/ml/ModelInterpreter.h> #include <unsupported/Eigen/CXX11/Tensor> #include <cereal/access.hpp> // serialiation of private members #undef min // clashes with std::limit on windows in polymorphic.hpp #undef max // clashes with std::limit on windows in polymorphic.hpp #include <cereal/types/polymorphic.hpp> // .cpp #include <EvoNet/ml/ModelErrorData.h> #include <EvoNet/ml/ModelKernal.h> namespace EvoNet { template<typename TensorT> class ModelInterpreterDefaultDevice : public ModelInterpreter<TensorT, Eigen::DefaultDevice> { public: using ModelInterpreter<TensorT, Eigen::DefaultDevice>::ModelInterpreter; void allocateForwardPropogationLayerTensors(const std::vector<OperationList<TensorT>>& FP_operations, const std::map<std::string, std::vector<int>>& operations_map, const std::vector<int>& source_layer_sizes, const std::vector<int>& sink_layer_sizes, const std::vector<std::vector<std::pair<int, int>>> weight_indices, std::vector<std::map<std::string, std::vector<std::pair<int, int>>>>& shared_weight_indices, const std::vector<std::vector<TensorT>>& weight_values, const std::vector<bool>& make_source_tensors, const std::vector<bool>& make_sink_tensors, const std::vector<bool>& make_weight_tensors, const int& batch_size, const int& memory_size, const bool& train) override; void executeForwardPropogationOperations(const int& time_step) override; void executeBackwardPropogationOperations(const int& time_step) override; void executeModelErrorOperations(Eigen::Tensor<TensorT, 2>& expected, const int& layer_id, std::shared_ptr<LossFunctionTensorOp<TensorT,Eigen::DefaultDevice>>& loss_function, std::shared_ptr<LossFunctionGradTensorOp<TensorT,Eigen::DefaultDevice>>& loss_function_grad, const int& time_step) override; void executeModelMetricOperations(Eigen::Tensor<TensorT, 2>& expected, const int& layer_id, std::shared_ptr<MetricFunctionTensorOp<TensorT,Eigen::DefaultDevice>> metric_function, const int& time_step, const int& metric_index) override; void executeWeightErrorOperations() override; void executeWeightUpdateOperations(const int& iter) override; void allocateModelErrorTensor(const int& batch_size, const int& memory_size, const int& n_metrics) override; void getModelResults(Model<TensorT>& model, const bool& output_nodes, const bool& weights, const bool& model_error, const bool& input_nodes) override; void checkMemory(const Model<TensorT>& model, const int& batch_size, const int& memory_size) override; void updateSolverParams(const int& param_index, const TensorT& param_factor) override; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ModelInterpreter<TensorT, Eigen::DefaultDevice>>(this)); } }; template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::allocateForwardPropogationLayerTensors(const std::vector<OperationList<TensorT>>& FP_operations, const std::map<std::string, std::vector<int>>& operations_map, const std::vector<int>& source_layer_sizes, const std::vector<int>& sink_layer_sizes, const std::vector<std::vector<std::pair<int, int>>> weight_indices, std::vector<std::map<std::string, std::vector<std::pair<int, int>>>>& shared_weight_indices, const std::vector<std::vector<TensorT>>& weight_values, const std::vector<bool>& make_source_tensors, const std::vector<bool>& make_sink_tensors, const std::vector<bool>& make_weight_tensors, const int& batch_size, const int& memory_size, const bool& train) { std::vector<OperationTensorStep<TensorT, Eigen::DefaultDevice>> operation_step_list; ActivationOpToActivationTensorOp<TensorT, Eigen::DefaultDevice> activation_conv; SolverOpToSolverTensorOp<TensorT, Eigen::DefaultDevice> solver_conv; IntegrationOpToIntegrationTensorOp<TensorT, Eigen::DefaultDevice> integration_conv; IntegrationErrorOpToIntegrationErrorTensorOp<TensorT, Eigen::DefaultDevice> integration_error_conv; IntegrationWeightGradOpToIntegrationWeightGradTensorOp<TensorT, Eigen::DefaultDevice> integration_weight_grad_conv; int iter = 0; for (const auto& operations : operations_map) { // make the tensors OperationTensorStep<TensorT, Eigen::DefaultDevice> operation_step; // [NOTE: order matters! sink layer should come before the source layer to keep with // the ordering generated in getForwardPropogationTensorDimensions.] std::shared_ptr<NodeTensorData<TensorT, Eigen::DefaultDevice>> sink_node_data(new NodeTensorDataCpu<TensorT>()); { // make the sink layer tensor and add it to the cache and operation step std::shared_ptr<ActivationTensorOp<TensorT, Eigen::DefaultDevice>> activation = nullptr; std::shared_ptr<ActivationTensorOp<TensorT, Eigen::DefaultDevice>> activation_grad = nullptr; std::shared_ptr<IntegrationTensorOp<TensorT, Eigen::DefaultDevice>> integration = nullptr; std::shared_ptr<IntegrationErrorTensorOp<TensorT, Eigen::DefaultDevice>> integration_error = nullptr; std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, Eigen::DefaultDevice>> integration_weight_grad = nullptr; if (make_sink_tensors[iter]) { sink_node_data->initNodeTensorData(batch_size, memory_size, sink_layer_sizes[iter], FP_operations[operations.second[0]].result.sink_node->getType(), FP_operations[operations.second[0]].result.sink_node->getIntegrationShared()->getName(), train); this->layer_tensors_.push_back(sink_node_data); operation_step.sink_layer.time_step = FP_operations[operations.second[0]].result.time_step; activation_conv(FP_operations[operations.second[0]].result.sink_node->getActivationShared(), activation, std::vector<TensorT>() = {}); operation_step.sink_layer.activation = activation; activation_conv(FP_operations[operations.second[0]].result.sink_node->getActivationGradShared(), activation_grad, std::vector<TensorT>() = {}); operation_step.sink_layer.activation_grad = activation_grad; integration_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationShared(), integration, std::vector<TensorT>() = {}); operation_step.sink_layer.integration = integration; integration_error_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationErrorShared(), integration_error, std::vector<TensorT>() = {}); operation_step.sink_layer.integration_error= integration_error; integration_weight_grad_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationWeightGradShared(), integration_weight_grad, std::vector<TensorT>() = {}); operation_step.sink_layer.integration_weight_grad = integration_weight_grad; operation_step.sink_layer.tensor_index = FP_operations[operations.second[0]].result.sink_node->getTensorIndex().first; } else { operation_step.sink_layer.tensor_index = FP_operations[operations.second[0]].result.sink_node->getTensorIndex().first; operation_step.sink_layer.time_step = FP_operations[operations.second[0]].result.time_step; activation_conv(FP_operations[operations.second[0]].result.sink_node->getActivationShared(), activation, std::vector<TensorT>() = {}); operation_step.sink_layer.activation = activation; activation_conv(FP_operations[operations.second[0]].result.sink_node->getActivationGradShared(), activation_grad, std::vector<TensorT>() = {}); operation_step.sink_layer.activation_grad = activation_grad; integration_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationShared(), integration, std::vector<TensorT>() = {}); operation_step.sink_layer.integration = integration; integration_error_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationErrorShared(), integration_error, std::vector<TensorT>() = {}); operation_step.sink_layer.integration_error= integration_error; integration_weight_grad_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationWeightGradShared(), integration_weight_grad, std::vector<TensorT>() = {}); operation_step.sink_layer.integration_weight_grad = integration_weight_grad; operation_step.sink_layer.time_step = FP_operations[operations.second[0]].result.time_step; } } std::shared_ptr<NodeTensorData<TensorT, Eigen::DefaultDevice>> source_node_data(new NodeTensorDataCpu<TensorT>()); { // make the source layer tensor and add it to the cache and operation step std::shared_ptr<ActivationTensorOp<TensorT, Eigen::DefaultDevice>> activation = nullptr; std::shared_ptr<ActivationTensorOp<TensorT, Eigen::DefaultDevice>> activation_grad = nullptr; std::shared_ptr<IntegrationTensorOp<TensorT, Eigen::DefaultDevice>> integration = nullptr; std::shared_ptr<IntegrationErrorTensorOp<TensorT, Eigen::DefaultDevice>> integration_error = nullptr; std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, Eigen::DefaultDevice>> integration_weight_grad = nullptr; if (make_source_tensors[iter]) { source_node_data->initNodeTensorData(batch_size, memory_size, source_layer_sizes[iter], FP_operations[operations.second[0]].arguments[0].source_node->getType(), FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationShared()->getName(), train); operation_step.source_layer.time_step = FP_operations[operations.second[0]].arguments[0].time_step; this->layer_tensors_.push_back(source_node_data); activation_conv(FP_operations[operations.second[0]].arguments[0].source_node->getActivationShared(), activation, std::vector<TensorT>() = {}); operation_step.source_layer.activation = activation; activation_conv(FP_operations[operations.second[0]].arguments[0].source_node->getActivationGradShared(), activation_grad, std::vector<TensorT>() = {}); operation_step.source_layer.activation_grad = activation_grad; integration_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationShared(), integration, std::vector<TensorT>() = {}); operation_step.source_layer.integration = integration; integration_error_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationErrorShared(), integration_error, std::vector<TensorT>() = {}); operation_step.source_layer.integration_error = integration_error; integration_weight_grad_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationWeightGradShared(), integration_weight_grad, std::vector<TensorT>() = {}); operation_step.source_layer.integration_weight_grad = integration_weight_grad; operation_step.source_layer.tensor_index = FP_operations[operations.second[0]].arguments[0].source_node->getTensorIndex().first; } else { operation_step.source_layer.tensor_index = FP_operations[operations.second[0]].arguments[0].source_node->getTensorIndex().first; operation_step.source_layer.time_step = FP_operations[operations.second[0]].arguments[0].time_step; activation_conv(FP_operations[operations.second[0]].arguments[0].source_node->getActivationShared(), activation, std::vector<TensorT>() = {}); operation_step.source_layer.activation = activation; activation_conv(FP_operations[operations.second[0]].arguments[0].source_node->getActivationGradShared(), activation_grad, std::vector<TensorT>() = {}); operation_step.source_layer.activation_grad = activation_grad; integration_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationShared(), integration, std::vector<TensorT>() = {}); operation_step.source_layer.integration = integration; integration_error_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationErrorShared(), integration_error, std::vector<TensorT>() = {}); operation_step.source_layer.integration_error = integration_error; integration_weight_grad_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationWeightGradShared(), integration_weight_grad, std::vector<TensorT>() = {}); operation_step.source_layer.integration_weight_grad = integration_weight_grad; } } // make the weight tensor and add it to the cache and operation step std::shared_ptr<WeightTensorData<TensorT, Eigen::DefaultDevice>> weight_data = std::make_shared<WeightTensorDataCpu<TensorT>>(WeightTensorDataCpu<TensorT>()); if (make_weight_tensors[iter]) { std::shared_ptr<SolverTensorOp<TensorT, Eigen::DefaultDevice>> solver = nullptr; std::vector<TensorT> solver_params; solver_conv(FP_operations[operations.second[0]].arguments[0].weight->getSolverOpShared(), solver, solver_params); weight_data->initWeightTensorData(source_layer_sizes[iter], sink_layer_sizes[iter], weight_indices[iter], shared_weight_indices[iter], weight_values[iter], train, solver_params, FP_operations[operations.second[0]].result.sink_node->getIntegrationShared()->getName()); this->weight_tensors_.push_back(weight_data); operation_step.weight.tensor_index = std::get<0>(FP_operations[operations.second[0]].arguments[0].weight->getTensorIndex()[0]); operation_step.weight.solver = solver; } else { std::cout << "Weight tensor is not being created...Check!" << std::endl; } //this->operation_steps_[FP_operations[operations.second[0]].result.sink_node->getOperationIndex()].push_back(operation_step); operation_step_list.push_back(operation_step); ++iter; } // add the operations to the cache this->operation_steps_.push_back(operation_step_list); } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::executeForwardPropogationOperations(const int& time_step) { for (std::vector<OperationTensorStep<TensorT, Eigen::DefaultDevice>>& operations_list : this->operation_steps_) { ModelKernalDefaultDevice<TensorT> model_kernal; Eigen::DefaultDevice device; // execute the forward propogation steps for (OperationTensorStep<TensorT, Eigen::DefaultDevice>& operation : operations_list) { model_kernal.executeForwardPropogation( this->layer_tensors_.at(operation.source_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDOutputPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDWeightPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHInputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDInputPointer().get(), operation.sink_layer.integration, this->layer_tensors_.at(operation.source_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), operation.source_layer.time_step + time_step, operation.sink_layer.time_step + time_step, device); // Not over-written model_kernal.executeNodeActivation( this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHInputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDInputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDOutputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHDtPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDDtPointer().get(), operation.sink_layer.activation, this->layer_tensors_.at(operation.sink_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), operation.sink_layer.time_step + time_step, device); // Over-written } } } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::executeBackwardPropogationOperations(const int & time_step) { for (int i = this->operation_steps_.size() - 1; i >= 0; --i) { //iterate backwards ModelKernalDefaultDevice<TensorT> model_kernal; Eigen::DefaultDevice device; // execute the forward propogation steps for (OperationTensorStep<TensorT, Eigen::DefaultDevice>& operation : this->operation_steps_[i]) { //reverse source/sink model_kernal.executeNodeDerivative( this->layer_tensors_.at(operation.source_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHDerivativePointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDDerivativePointer().get(), operation.source_layer.activation_grad, this->layer_tensors_.at(operation.source_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), operation.source_layer.time_step + time_step, device); model_kernal.executeBackwardPropogation( this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHErrorPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDErrorPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHInputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDInputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDOutputPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDWeightPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHErrorPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDErrorPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHDerivativePointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDDerivativePointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), // [TODO: replace with N] operation.sink_layer.integration_error, // Was source_layer this->layer_tensors_.at(operation.sink_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), operation.sink_layer.time_step + time_step, operation.source_layer.time_step + time_step, device); } } } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::executeModelErrorOperations(Eigen::Tensor<TensorT, 2>& expected, const int& layer_id, std::shared_ptr<LossFunctionTensorOp<TensorT,Eigen::DefaultDevice>>& loss_function, std::shared_ptr<LossFunctionGradTensorOp<TensorT,Eigen::DefaultDevice>>& loss_function_grad, const int& time_step) { ModelKernalDefaultDevice<TensorT> model_kernal; Eigen::DefaultDevice device; auto layer_tensor_data = this->getLayerTensor(layer_id); model_kernal.executeModelErrors( expected, layer_tensor_data->getHOutputPointer().get(), layer_tensor_data->getDOutputPointer().get(), this->model_error_->getHErrorPointer().get(), this->model_error_->getDErrorPointer().get(), layer_tensor_data->getHErrorPointer().get(), layer_tensor_data->getDErrorPointer().get(), loss_function, loss_function_grad, layer_tensor_data->getBatchSize(), layer_tensor_data->getMemorySize(), layer_tensor_data->getLayerSize(), time_step, device); } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::executeModelMetricOperations(Eigen::Tensor<TensorT, 2>& expected, const int & layer_id, std::shared_ptr<MetricFunctionTensorOp<TensorT,Eigen::DefaultDevice>> metric_function, const int & time_step, const int & metric_index) { ModelKernalDefaultDevice<TensorT> model_kernal; Eigen::DefaultDevice device; auto layer_tensor_data = this->getLayerTensor(layer_id); model_kernal.executeModelMetric( expected, layer_tensor_data->getHOutputPointer().get(), layer_tensor_data->getDOutputPointer().get(), this->model_error_->getHMetricPointer().get(), this->model_error_->getDMetricPointer().get(), metric_function, layer_tensor_data->getBatchSize(), layer_tensor_data->getMemorySize(), layer_tensor_data->getLayerSize(), this->model_error_->getNMetrics(), time_step, metric_index, device); } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::executeWeightErrorOperations() { for (std::vector<OperationTensorStep<TensorT, Eigen::DefaultDevice>>& operations_list : this->operation_steps_) { ModelKernalDefaultDevice<TensorT> model_kernal; Eigen::DefaultDevice device; // execute the forward propogation steps for (OperationTensorStep<TensorT, Eigen::DefaultDevice>& operation : operations_list) { model_kernal.executeWeightErrors( this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHErrorPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDErrorPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHInputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDInputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), // [TODO: change to N] operation.sink_layer.integration_weight_grad, this->weight_tensors_.at(operation.weight.tensor_index)->getHWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHErrorPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDErrorPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), device); model_kernal.executeSharedWeightErrors( this->weight_tensors_.at(operation.weight.tensor_index)->getHErrorPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDErrorPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHSharedWeightsPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDSharedWeightsPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), this->weight_tensors_.at(operation.weight.tensor_index)->getNSharedWeights(), device); } } } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::executeWeightUpdateOperations(const int& iter) { for (std::vector<OperationTensorStep<TensorT, Eigen::DefaultDevice>>& operations_list : this->operation_steps_) { ModelKernalDefaultDevice<TensorT> model_kernal; Eigen::DefaultDevice device; // execute the forward propogation steps for (OperationTensorStep<TensorT, Eigen::DefaultDevice>& operation : operations_list) { model_kernal.executeWeightUpdate( this->weight_tensors_.at(operation.weight.tensor_index)->getHWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHSolverParamsPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDSolverParamsPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHErrorPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDErrorPointer().get(), operation.weight.solver, this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), iter, device); } } } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::allocateModelErrorTensor(const int& batch_size, const int& memory_size, const int& n_metrics) { std::shared_ptr<ModelErrorData<TensorT, Eigen::DefaultDevice>> model_error_data(new ModelErrorDataCpu<TensorT>()); model_error_data->initModelErrorData(batch_size, memory_size, n_metrics); this->model_error_ = model_error_data; } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::getModelResults(Model<TensorT>& model, const bool& output_nodes, const bool& weights, const bool& model_error, const bool& input_nodes) { // copy out the weight values if (weights) { for (auto& weight_map : model.getWeightsMap()) { // NOTE: there is a strange bug where the tensor indices of the weight pointer are not updated if (weight_map.second->getTensorIndex().size() > 0) { const int tensor_index = std::get<0>(weight_map.second->getTensorIndex()[0]); const int layer1_index = std::get<1>(weight_map.second->getTensorIndex()[0]); const int layer2_index = std::get<2>(weight_map.second->getTensorIndex()[0]); //const int tensor_index = std::get<0>(model.getWeightsMap().at(weight_map.second->getName())->getTensorIndex()[0]); //const int layer1_index = std::get<1>(model.getWeightsMap().at(weight_map.second->getName())->getTensorIndex()[0]); //const int layer2_index = std::get<2>(model.getWeightsMap().at(weight_map.second->getName())->getTensorIndex()[0]); weight_map.second->setWeight(this->getWeightTensor(tensor_index)->getWeight()(layer1_index, layer2_index)); } } } // copy out the model error if (model_error) { model.setError(this->model_error_->getError()); model.setMetric(this->model_error_->getMetric()); } // copy out the output node values if (output_nodes) { for (auto& output_node : model.getOutputNodes()) { // NOTE: there is a strange bug where the tensor indices of the output nodes pointer are not updated //const int tensor_index = output_node->getTensorIndex().first; //const int layer_index = output_node->getTensorIndex().second; const int tensor_index = model.getNodesMap().at(output_node->getName())->getTensorIndex().first; const int layer_index = model.getNodesMap().at(output_node->getName())->getTensorIndex().second; model.getNodesMap().at(output_node->getName())->setOutput(this->getLayerTensor(tensor_index)->getOutput().chip(layer_index, 2)); } } // copy out the output node values if (input_nodes) { for (auto& input_node : model.getInputNodes()) { const int tensor_index = model.getNodesMap().at(input_node->getName())->getTensorIndex().first; const int layer_index = model.getNodesMap().at(input_node->getName())->getTensorIndex().second; model.getNodesMap().at(input_node->getName())->setInput(this->getLayerTensor(tensor_index)->getInput().chip(layer_index, 2)); } } } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::checkMemory(const Model<TensorT>& model, const int& batch_size, const int& memory_size) { // TODO } template<typename TensorT> inline void ModelInterpreterDefaultDevice<TensorT>::updateSolverParams(const int & param_index, const TensorT & param_factor) { for (auto& weight_tensor_data : this->weight_tensors_) { if (weight_tensor_data->getNSolverParams() > 0) { weight_tensor_data->getSolverParams().chip(param_index, 2) = weight_tensor_data->getSolverParams().chip(param_index, 2) * weight_tensor_data->getSolverParams().chip(param_index, 2).constant(param_factor); } } } } CEREAL_REGISTER_TYPE(EvoNet::ModelInterpreterDefaultDevice<float>); // TODO: add double, int, etc. #endif //EVONET_MODELINTERPRETERDEFAULTDEVICE_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE PeakSimulator test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/PeakSimulator.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(peaksimulator) BOOST_AUTO_TEST_CASE(constructor) { PeakSimulator<double>* ptr = nullptr; PeakSimulator<double>* nullPointer = nullptr; ptr = new PeakSimulator<double>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(constructor2) { PeakSimulator<double> psim(500.0, 1.0, 0.0, 10.0, 2.0, 1.0, 5.0, 10.0, 1e6); BOOST_CHECK_EQUAL(psim.getStepSizeMu(), 500.0); BOOST_CHECK_EQUAL(psim.getStepSizeSigma(), 1.0); BOOST_CHECK_EQUAL(psim.getWindowStart(), 0.0); BOOST_CHECK_EQUAL(psim.getWindowEnd(), 10.0); BOOST_CHECK_EQUAL(psim.getNoiseMu(), 2.0); BOOST_CHECK_EQUAL(psim.getNoiseSigma(), 1.0); BOOST_CHECK_EQUAL(psim.getBaselineLeft(), 5.0); BOOST_CHECK_EQUAL(psim.getBaselineRight(), 10.0); BOOST_CHECK_EQUAL(psim.getSaturationLimit(), 1e6); } BOOST_AUTO_TEST_CASE(destructor) { PeakSimulator<double>* ptr = nullptr; ptr = new PeakSimulator<double>(); delete ptr; } BOOST_AUTO_TEST_CASE(gettersAndSetters) { PeakSimulator<double> psim; psim.setStepSizeMu(500.0); psim.setStepSizeSigma(1.0); psim.setWindowStart(0.0); psim.setWindowEnd(10.0); psim.setNoiseMu(2.0); psim.setNoiseSimga(1.0); psim.setBaselineLeft(5.0); psim.setBaselineRight(10.0); psim.setSaturationLimit(1e6); BOOST_CHECK_EQUAL(psim.getStepSizeMu(), 500.0); BOOST_CHECK_EQUAL(psim.getStepSizeSigma(), 1.0); BOOST_CHECK_EQUAL(psim.getWindowStart(), 0.0); BOOST_CHECK_EQUAL(psim.getWindowEnd(), 10.0); BOOST_CHECK_EQUAL(psim.getNoiseMu(), 2.0); BOOST_CHECK_EQUAL(psim.getNoiseSigma(), 1.0); BOOST_CHECK_EQUAL(psim.getBaselineLeft(), 5.0); BOOST_CHECK_EQUAL(psim.getBaselineRight(), 10.0); BOOST_CHECK_EQUAL(psim.getSaturationLimit(), 1e6); } BOOST_AUTO_TEST_CASE(generateRangeWithNoise) { PeakSimulator<double> psim; // no noise std::vector<double> range = psim.generateRangeWithNoise(0.0, 1.0, 0.0, 10.0); BOOST_CHECK_EQUAL(range.size(), 11); BOOST_CHECK_EQUAL(range[0], 0.0); BOOST_CHECK_EQUAL(range.back(), 10.0); // with noise range = psim.generateRangeWithNoise(0.0, 1.0, 0.1, 10.0); BOOST_CHECK_EQUAL(range[0], 0.0); BOOST_CHECK_NE(range.back(), 10.0); // negative step size range = psim.generateRangeWithNoise(0.0, -1.0, 0.0, 10.0); BOOST_CHECK_EQUAL(range.size(), 11); BOOST_CHECK_EQUAL(range[0], 0.0); BOOST_CHECK_EQUAL(range.back(), 10.0); // high standard deviation range = psim.generateRangeWithNoise(0.0, 1.0, 10.0, 10.0); BOOST_CHECK_EQUAL(range.size(), 11); BOOST_CHECK_EQUAL(range[0], 0.0); BOOST_CHECK_EQUAL(range.back(), 10.0); } BOOST_AUTO_TEST_CASE(addNoise) { PeakSimulator<double> psim; // no noise const std::vector<double> range = {0, 1, 2, 3, 4, 5}; std::vector<double> noise_range = range; psim.addNoise(noise_range, 0.0, 0.0); for (int i=0; i<range.size(); ++i) { BOOST_CHECK_EQUAL(range[i], noise_range[i]); } // with noise noise_range = range; psim.addNoise(noise_range, 0.0, 1.0); for (int i=0; i<range.size(); ++i) { BOOST_CHECK_NE(range[i], noise_range[i]); } // with noise noise_range = range; psim.addNoise(noise_range, 1.0, 0.0); for (int i=0; i<range.size(); ++i) { BOOST_CHECK_EQUAL(range[i] + 1.0, noise_range[i]); } } BOOST_AUTO_TEST_CASE(addBaseline) { PeakSimulator<double> psim; // toy peak const std::vector<double> x = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; const std::vector<double> y = {0, 0, 1, 3, 7, 10, 7, 3, 1, 0, 0}; // no baseline std::vector<double> y_baseline = y; psim.addBaseline(x, y_baseline, 0.0, 0.0, 5); std::vector<double> y_test = {0, 0, 1, 3, 7, 10, 7, 3, 1, 0, 0}; for (int i=0; i<y_test.size(); ++i) { BOOST_CHECK_EQUAL(y_baseline[i], y_test[i]); } // no noise y_baseline = y; psim.addBaseline(x, y_baseline, 3, 1, 5); y_test = {3, 3, 3, 3, 7, 10, 7, 3, 1, 1, 1}; for (int i=0; i<y_test.size(); ++i) { BOOST_CHECK_EQUAL(y_baseline[i], y_test[i]); } } BOOST_AUTO_TEST_CASE(flattenPeak) { PeakSimulator<double> psim; // toy peak const std::vector<double> y = {0, 0, 1, 3, 7, 10, 7, 3, 1, 0, 0}; // no saturation std::vector<double> y_saturation = y; psim.flattenPeak(y_saturation, 10); for (int i=0; i<y.size(); ++i) { BOOST_CHECK_EQUAL(y_saturation[i], y[i]); } // saturation y_saturation = y; psim.flattenPeak(y_saturation, 5); std::vector<double> y_test = {0, 0, 1, 3, 5, 5, 5, 3, 1, 0, 0}; for (int i=0; i<y_test.size(); ++i) { BOOST_CHECK_EQUAL(y_saturation[i], y_test[i]); } } BOOST_AUTO_TEST_CASE(simulatePeak) { // time and intensity arrays std::vector<double> x, y; // Gaussian peak, evenly spaced points, no detector noise or saturation PeakSimulator<double> psim(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 15); EMGModel<double> emg(10.0, 0.0, 5.0, 1.0); psim.simulatePeak(x, y, emg); std::vector<double> x_test = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; std::vector<double> y_test = { 3.7266531720786709e-05, 0.0033546262790251184, 0.11108996538242306, 1.353352832366127, 6.0653065971263338, 10, 6.0653065971263338, 1.353352832366127, 0.11108996538242306, 0.0033546262790251184, 3.7266531720786709e-05 }; for (int i = 0; i < x.size(); ++i) { BOOST_CHECK_CLOSE(x[i], x_test[i], 1e-6); BOOST_CHECK_CLOSE(y[i], y_test[i], 1e-6); } // time and intensity arrays x.clear(); y.clear(); // Tailing peak, evenly spaced points, no detector noise or saturation psim = PeakSimulator<double>(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 15); emg = EMGModel<double>(10.0, 0.5, 5.0, 1.0); psim.simulatePeak(x, y, emg); x_test = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; y_test = { 1.04424e-05, 0.0010894327367425214, 0.0428381, 0.640548, 3.69487, 8.42738, 7.95379, 3.39235, 0.772531, 0.1214391, 0.0167949 }; for (int i = 0; i < x.size(); ++i) { BOOST_CHECK_CLOSE(x[i], x_test[i], 1e-4); BOOST_CHECK_CLOSE(y[i], y_test[i], 1e-4); } // time and intensity arrays x.clear(); y.clear(); // Tailing peak, non-evenly spaced points, detector noise and saturation psim = PeakSimulator<double>(1.0, 0.1, 0.0, 10.0, 0.0, 0.5, 1.0, 3.0, 8); emg = EMGModel<double>(10.0, 0.5, 5.0, 1.0); psim.simulatePeak(x, y, emg); x_test = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; y_test = { 1.04424e-05, 0.0010894327367425214, 0.0428381, 0.640548, 3.69487, 8.42738, 7.95379, 3.39235, 0.772531, 0.1214391, 0.0167949 }; BOOST_CHECK_EQUAL(x.size(), y.size()); BOOST_CHECK_EQUAL(x[0], 0); for (int i = 0; i < y.size(); ++i) { BOOST_TEST(y[i] <= 8.0, boost::test_tools::tolerance(0.001)); } // time and intensity arrays x.clear(); y.clear(); // Negative step size psim = PeakSimulator<double>(-0.2, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 15); emg = EMGModel<double>(10.0, 0.0, 5.0, 1.0); psim.simulatePeak(x, y, emg); x_test = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; y_test = { 3.7266531720786709e-05, 0.0033546262790251184, 0.11108996538242306, 1.353352832366127, 6.0653065971263338, 10, 6.0653065971263338, 1.353352832366127, 0.11108996538242306, 0.0033546262790251184, 3.7266531720786709e-05 }; for (int i = 0; i < x.size(); ++i) { BOOST_CHECK_CLOSE(x[i], x_test[i], 1e-6); BOOST_CHECK_CLOSE(y[i], y_test[i], 1e-6); } // // UNCOMMENT to print out new test values // for (int i=0; i<x.size(); ++i) // { // std::cout<< x[i] << ", "; // } // std::cout<< ";" <<std::endl; // for (int i=0; i<y.size(); ++i) // { // std::cout<< y[i] << ", "; // } // std::cout<< ";" <<std::endl; } BOOST_AUTO_TEST_CASE(getBestLeftAndRight) { // time and intensity arrays std::vector<double> x, y; std::pair<double, double> best_lr; // Gaussian peak, evenly spaced points, no detector noise or saturation PeakSimulator<double> psim(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 15); EMGModel<double> emg(10.0, 0.0, 5.0, 1.0); psim.simulatePeak(x, y, emg); best_lr = psim.getBestLeftAndRight(x, y, emg.getMu()); BOOST_CHECK_CLOSE(best_lr.first, 1.0, 1e-4); BOOST_CHECK_CLOSE(best_lr.second, 9.0, 1e-4); // time and intensity arrays x.clear(); y.clear(); // Tailing peak, evenly spaced points, no detector noise or saturation psim = PeakSimulator<double>(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 15); emg = EMGModel<double>(10.0, 0.5, 5.0, 1.0); psim.simulatePeak(x, y, emg); best_lr = psim.getBestLeftAndRight(x, y, emg.getMu()); BOOST_CHECK_CLOSE(best_lr.first, 1.0, 1e-4); BOOST_CHECK_CLOSE(best_lr.second, 10.0, 1e-4); // time and intensity arrays x.clear(); y.clear(); // Tailing peak, non-evenly spaced points, detector noise and saturation psim = PeakSimulator<double>(1.0, 0.1, 0.0, 10.0, 0.0, 0.5, 1.0, 3.0, 8); emg = EMGModel<double>(10.0, 0.5, 5.0, 1.0); psim.simulatePeak(x, y, emg); best_lr = psim.getBestLeftAndRight(x, y, emg.getMu()); BOOST_CHECK_LE(best_lr.first, 3.0); BOOST_CHECK_GE(best_lr.first, 0.0); BOOST_CHECK_LE(best_lr.second, 10.0); BOOST_CHECK_GE(best_lr.second, 7.0); // // UNCOMMENT to print out new test values // for (int i=0; i<x.size(); ++i) // { // std::cout<< x[i] << ", "; // } // std::cout<< ";" <<std::endl; // for (int i=0; i<y.size(); ++i) // { // std::cout<< y[i] << ", "; // } // std::cout<< ";" <<std::endl; } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE IntegrationFunctionTensor test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/IntegrationFunctionTensor.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(integrationFunctionTensor) /** SumTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorSumTensorOp) { SumTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; SumTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorSumTensorOp) { SumTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new SumTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionSumTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; SumTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(operationfunctionSumTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; SumTensorOp<double, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameSumTensorOp) { SumTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "SumTensorOp"); } /** ProdTensorOp Tests */ BOOST_AUTO_TEST_CASE(destructorProdTensorOp) { ProdTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new ProdTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionProdTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); //sink_input.setZero(); // Pre initNode update sink_input.setConstant(1); Eigen::DefaultDevice device; ProdTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); //expected.setValues({ {{0}, {1}}, {{0}, {4}}, {{0}, {9}}, {{0}, {16}} }); // Pre initNode update expected.setValues({ {{1}, {1}}, {{1}, {4}}, {{1}, {9}}, {{1}, {16}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(operationfunctionProdTensorOp2) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 2; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setZero(); weights(0, 0) = 2; weights(1, 1) = 2; Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); //sink_input.setZero(); // Pre initNode update sink_input.setConstant(1); Eigen::DefaultDevice device; ProdTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{1, 1}, {2, 2}}, {{1, 1}, {4, 4}}, {{1, 1}, {6, 6}}, {{1, 1}, {8,8}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameProdTensorOp) { ProdTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ProdTensorOp"); } /** ProdSCTensorOp Tests */ BOOST_AUTO_TEST_CASE(destructorProdSCTensorOp) { ProdSCTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new ProdSCTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionProdSCTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 2; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setZero(); weights(0, 0) = 2; weights(1, 1) = 2; Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); //sink_input.setZero(); // Pre initNode update sink_input.setConstant(1); Eigen::DefaultDevice device; ProdSCTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{1, 1}, {2, 2}}, {{1, 1}, {4, 4}}, {{1, 1}, {6, 6}}, {{1, 1}, {8,8}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameProdSCTensorOp) { ProdSCTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ProdSCTensorOp"); } /** MaxTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMaxTensorOp) { MaxTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MaxTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMaxTensorOp) { MaxTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MaxTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMaxTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; MaxTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2}}, {{0}, {3}}, {{0}, {4}}, {{0}, {5}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameMaxTensorOp) { MaxTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MaxTensorOp"); } /** MinTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMinTensorOp) { MinTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MinTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMinTensorOp) { MinTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MinTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMinTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(2); Eigen::DefaultDevice device; MinTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{2}, {1}}, {{2}, {2}}, {{2}, {2}}, {{2}, {2}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameMinTensorOp) { MinTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MinTensorOp"); } /** MeanTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMeanTensorOp) { MeanTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MeanTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMeanTensorOp) { MeanTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MeanTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMeanTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; MeanTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {1.5}}, {{0}, {2.5}}, {{0}, {3.5}}, {{0}, {4.5}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(operationfunctionMeanTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; MeanTensorOp<double, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {1.5}}, {{0}, {2.5}}, {{0}, {3.5}}, {{0}, {4.5}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameMeanTensorOp) { MeanTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MeanTensorOp"); } /** VarModTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarModTensorOp) { VarModTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; VarModTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarModTensorOp) { VarModTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new VarModTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionVarModTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; VarModTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2.5}}, {{0}, {6.5}}, {{0}, {12.5}}, {{0}, {20.5}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(operationfunctionVarModTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; VarModTensorOp<double, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2.5}}, {{0}, {6.5}}, {{0}, {12.5}}, {{0}, {20.5}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameVarModTensorOp) { VarModTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "VarModTensorOp"); } /** VarTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarTensorOp) { VarTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; VarTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarTensorOp) { VarTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new VarTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionVarTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; VarTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {0.25}}, {{0}, {0.25}}, {{0}, {0.25}}, {{0}, {0.25}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameVarTensorOp) { VarTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "VarTensorOp"); } /** CountTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorCountTensorOp) { CountTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; CountTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorCountTensorOp) { CountTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new CountTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionCountTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_input(batch_size, memory_size, sink_layer_size); sink_input.setConstant(0); Eigen::DefaultDevice device; CountTensorOp<float, Eigen::DefaultDevice> operation; operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2}}, {{0}, {2}}, {{0}, {2}}, {{0}, {2}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameCountTensorOp) { CountTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "CountTensorOp"); } /** SumErrorTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorSumErrorTensorOp) { SumErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; SumErrorTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorSumErrorTensorOp) { SumErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new SumErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionSumErrorTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<float, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; SumErrorTensorOp<float, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {4}}, {{0}, {8}}, {{0}, {12}}, {{0}, {16}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameSumErrorTensorOp) { SumErrorTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "SumErrorTensorOp"); } /** ProdErrorTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorProdErrorTensorOp) { ProdErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ProdErrorTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorProdErrorTensorOp) { ProdErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new ProdErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionProdErrorTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<float, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; ProdErrorTensorOp<float, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {4}}, {{0}, {16}}, {{0}, {36}}, {{0}, {64}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(operationfunctionProdErrorTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; ProdErrorTensorOp<double, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {4}}, {{0}, {16}}, {{0}, {36}}, {{0}, {64}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameProdErrorTensorOp) { ProdErrorTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ProdErrorTensorOp"); } /** MaxErrorTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMaxErrorTensorOp) { MaxErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MaxErrorTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMaxErrorTensorOp) { MaxErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MaxErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMaxErrorTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<float, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setValues({ {{0}, {2}}, {{0}, {3}}, {{0}, {4}}, {{0}, {5}} }); Eigen::DefaultDevice device; MaxErrorTensorOp<float, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {4}}, {{0}, {6}}, {{0}, {8}}, {{0}, {10}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameMaxErrorTensorOp) { MaxErrorTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MaxErrorTensorOp"); } /** MinErrorTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMinErrorTensorOp) { MinErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MinErrorTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMinErrorTensorOp) { MinErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MinErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMinErrorTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 2}, {0, 0}}, {{2, 3}, {0, 0}}, {{3, 4}, {0, 0}}, {{4, 5}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<float, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setValues({ {{0}, {1}}, {{0}, {2}}, {{0}, {3}}, {{0}, {4}} }); Eigen::DefaultDevice device; MinErrorTensorOp<float, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameMinErrorTensorOp) { MinErrorTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MinErrorTensorOp"); } /** MeanErrorTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMeanErrorTensorOp) { MeanErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MeanErrorTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMeanErrorTensorOp) { MeanErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MeanErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMeanErrorTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<float, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; MeanErrorTensorOp<float, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), 4, //NOTE: used only for testing purposes! batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {1}}, {{0}, {2}}, {{0}, {3}}, {{0}, {4}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(operationfunctionMeanErrorTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; MeanErrorTensorOp<double, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), 4, //NOTE: used only for testing purposes! batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {1}}, {{0}, {2}}, {{0}, {3}}, {{0}, {4}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } /** VarModErrorTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarModErrorTensorOp) { VarModErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; VarModErrorTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarModErrorTensorOp) { VarModErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new VarModErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionVarModErrorTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<float, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; VarModErrorTensorOp<float, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), 4, //NOTE: used only for testing purposes! batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(operationfunctionVarModErrorTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; VarModErrorTensorOp<double, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), 4, //NOTE: used only for testing purposes! batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameMeanErrorTensorOp) { MeanErrorTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MeanErrorTensorOp"); } /** VarErrorTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarErrorTensorOp) { VarErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; VarErrorTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarErrorTensorOp) { VarErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new VarErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionVarErrorTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<float, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; VarErrorTensorOp<float, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {4}}, {{0}, {8}}, {{0}, {12}}, {{0}, {16}} }); // TODO: update //for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { // for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { // //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; // BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); // } // } //} } BOOST_AUTO_TEST_CASE(getNameVarErrorTensorOp) { VarErrorTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "VarErrorTensorOp"); } /** CountErrorTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorCountErrorTensorOp) { CountErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; CountErrorTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorCountErrorTensorOp) { CountErrorTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new CountErrorTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionCountErrorTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> source_error(batch_size, memory_size, source_layer_size); source_error.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}}, {{3, 3}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 3> sink_derivative(batch_size, memory_size, sink_layer_size); sink_derivative.setConstant(2); Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setConstant(0); Eigen::Tensor<float, 3> sink_output(batch_size, memory_size, sink_layer_size); sink_output.setConstant(1); Eigen::DefaultDevice device; CountErrorTensorOp<float, Eigen::DefaultDevice> operation; operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); Eigen::Tensor<float, 3> expected(batch_size, memory_size, sink_layer_size); expected.setValues({ {{0}, {0}}, {{0}, {0}}, {{0}, {0}}, {{0}, {0}} }); for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) { //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl; BOOST_CHECK_CLOSE(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter), 1e-4); } } } } BOOST_AUTO_TEST_CASE(getNameCountErrorTensorOp) { CountErrorTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "CountErrorTensorOp"); } /** SumWeightGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorSumWeightGradTensorOp) { SumWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; SumWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorSumWeightGradTensorOp) { SumWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new SumWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionSumWeightGradTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {1}}, //{{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 0}, {1, 0}}, //{{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; SumWeightGradTensorOp<float, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<float, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-5}, {-4} }); //expected.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(operationfunctionSumWeightGradTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {1}}, //{{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 0}, {1, 0}}, //{{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; SumWeightGradTensorOp<double, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-5}, {-4} }); //expected.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(getNameSumWeightGradTensorOp) { SumWeightGradTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "SumWeightGradTensorOp"); } /** ProdWeightGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorProdWeightGradTensorOp) { ProdWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ProdWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorProdWeightGradTensorOp) { ProdWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new ProdWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionProdWeightGradTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; ProdWeightGradTensorOp<float, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<float, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-8}, {-8} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(getNameProdWeightGradTensorOp) { ProdWeightGradTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "ProdWeightGradTensorOp"); } /** MaxWeightGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMaxWeightGradTensorOp) { MaxWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MaxWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMaxWeightGradTensorOp) { MaxWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MaxWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMaxWeightGradTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; MaxWeightGradTensorOp<float, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<float, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(operationfunctionMaxWeightGradTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; MaxWeightGradTensorOp<double, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(getNameMaxWeightGradTensorOp) { MaxWeightGradTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MaxWeightGradTensorOp"); } /** MinWeightGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMinWeightGradTensorOp) { MinWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MinWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMinWeightGradTensorOp) { MinWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MinWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMinWeightGradTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; MinWeightGradTensorOp<float, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<float, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(operationfunctionMinWeightGradTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; MinWeightGradTensorOp<double, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(getNameMinWeightGradTensorOp) { MinWeightGradTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MinWeightGradTensorOp"); } /** MeanWeightGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMeanWeightGradTensorOp) { MeanWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; MeanWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorMeanWeightGradTensorOp) { MeanWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new MeanWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionMeanWeightGradTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; MeanWeightGradTensorOp<float, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<float, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-2.375}, {-2.375} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(operationfunctionMeanWeightGradTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; MeanWeightGradTensorOp<double, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-2.375}, {-2.375} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(getNameMeanWeightGradTensorOp) { MeanWeightGradTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "MeanWeightGradTensorOp"); } /** VarModWeightGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarModWeightGradTensorOp) { VarModWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; VarModWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarModWeightGradTensorOp) { VarModWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new VarModWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionVarModWeightGradTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; VarModWeightGradTensorOp<float, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<float, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(operationfunctionVarModWeightGradTensorOpDouble) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; VarModWeightGradTensorOp<double, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-4.75}, {-4.75} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(getNameVarModWeightGradTensorOp) { VarModWeightGradTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "VarModWeightGradTensorOp"); } /** VarWeightGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorVarWeightGradTensorOp) { VarWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; VarWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorVarWeightGradTensorOp) { VarWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new VarWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionVarWeightGradTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; VarWeightGradTensorOp<float, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<float, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {-4.75}, {-4.75} }); // TODO: update //for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { // for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { // //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; // BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); // } //} } BOOST_AUTO_TEST_CASE(getNameVarWeightGradTensorOp) { VarWeightGradTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "VarWeightGradTensorOp"); } /** CountWeightGradTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorCountWeightGradTensorOp) { CountWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; CountWeightGradTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorCountWeightGradTensorOp) { CountWeightGradTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new CountWeightGradTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionCountWeightGradTensorOp) { const int batch_size = 4; const int memory_size = 2; const int source_layer_size = 2; const int sink_layer_size = 1; const int source_time_step = 0; const int sink_time_step = 1; Eigen::Tensor<float, 3> sink_error(batch_size, memory_size, sink_layer_size); sink_error.setValues({ {{1}, {1}}, {{2}, {1}}, {{3}, {0}}, {{4}, {0}} }); Eigen::Tensor<float, 3> source_output(batch_size, memory_size, source_layer_size); source_output.setValues({ {{1, 1}, {1, 1}}, {{2, 2}, {2, 2}}, {{1, 1}, {0, 0}}, {{2, 2}, {0, 0}} }); Eigen::Tensor<float, 3> source_input(batch_size, memory_size, source_layer_size); source_input.setValues({ {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}}, {{2, 2}, {0, 0}}, {{4, 4}, {0, 0}} }); Eigen::Tensor<float, 2> weights(source_layer_size, sink_layer_size); weights.setConstant(1); Eigen::Tensor<float, 2> weight_error(source_layer_size, sink_layer_size); weight_error.setConstant(0); Eigen::DefaultDevice device; CountWeightGradTensorOp<float, Eigen::DefaultDevice> operation; operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size, batch_size, memory_size, source_layer_size, sink_layer_size, device); Eigen::Tensor<float, 2> expected(source_layer_size, sink_layer_size); expected.setValues({ {0}, {0} }); for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) { for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) { //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl; BOOST_CHECK_CLOSE(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter), 1e-4); } } } BOOST_AUTO_TEST_CASE(getNameCountWeightGradTensorOp) { CountWeightGradTensorOp<float, Eigen::DefaultDevice> operation; BOOST_CHECK_EQUAL(operation.getName(), "CountWeightGradTensorOp"); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/ModelTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/io/Parameters.h> #include <EvoNet/simulator/ChromatogramSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; /** Application designed to train a network to accurately integrate and identify peaks Features: - de-noises the chromatogram for more accurate peak area calculation - determines the best left, right, and inner points for each peak as probabilities Input: - vector of time/mz and intensity pairs Data pre-processing: - each time/mz and intensity pair is binned into equally spaced time steps - intensities are normalized to the range 0 to 1 Output: - vector of intensity bins - vector of logits of peak probabilities (peak threshold > 0.75) Post-processing: - integration of peaks based on binned intensity, average distance between time-steps, and logit peak probability pairs */ // Extended template<typename TensorT> class ModelTrainerExt : public ModelTrainerExperimentalDefaultDevice<TensorT> { public: /* @brief Denoising Auto Encoder that takes a segment of a raw chromatogram and returns a smoothed and denoised version of the same chromatogram */ void makeDenoisingAE(Model<TensorT>& model, int n_inputs = 512, int n_encodings = 32, int n_hidden_0 = 512, int n_hidden_1 = 256, int n_hidden_2 = 64, bool specify_layers = true) { model.setId(0); model.setName("DenoisingAE"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, true); // Define the activation auto activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); auto activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-4, 0.9, 0.999, 1e-8, 10)); // Add the Encoder FC layers if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN_Intensity_0", "EN_Intensity_0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN_Intensity_1", "EN_Intensity_1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN_Intensity_2", "EN_Intensity_2", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } // Add the encoding layers for Intensity std::vector<std::string> node_names_encoding = model_builder.addFullyConnected(model, "Encoding_Intensity", "Encoding_Intensity", node_names, n_encodings, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((int)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); // Add the Decoder FC layers node_names = node_names_encoding; if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "DE_Intensity_2", "DE_Intensity_2", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "DE_Intensity_1", "DE_Intensity_1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "DE_Intensity_0", "DE_Intensity_0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); } // Add the output nodes node_names = model_builder.addFullyConnected(model, "DE_Intensity_Out", "DE_Intensity_Out", node_names, n_inputs, //std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), //std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names) { model.nodes_.at(node_name)->setType(NodeType::output); } model.setInputAndOutputNodes(); //if (!model.checkCompleteInputToOutput()) // std::cout << "Model input and output are not fully connected!" << std::endl; } }; template<typename TensorT> class DataSimulatorExt : public ChromatogramSimulator<TensorT> { public: void simulateChromData_(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_nodes = metric_output_data.dimension(2); input_data.setZero(); loss_output_data.setZero(); metric_output_data.setZero(); if (this->output_data_type_ == "EMG") { assert(n_output_nodes == 4); assert(n_metric_nodes == 4); } else { assert(n_output_nodes == n_input_nodes); assert(n_metric_nodes == n_input_nodes); } // Reformat the Chromatogram for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { std::vector<TensorT> chrom_time, chrom_intensity, chrom_time_test, chrom_intensity_test; std::vector<std::pair<TensorT, TensorT>> best_lr; std::vector<TensorT> peak_apices; std::vector<EMGModel<TensorT>> emgs; // make the chrom and noisy chrom this->simulateChromatogram(chrom_time_test, chrom_intensity_test, chrom_time, chrom_intensity, best_lr, peak_apices, emgs, step_size_mu_, step_size_sigma_, chrom_window_size_, noise_mu_, noise_sigma_, baseline_height_, n_peaks_, emg_h_, emg_tau_, emg_mu_offset_, emg_sigma_); for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = chrom_intensity.at(nodes_iter); //intensity if (this->output_data_type_ == "Points") { loss_output_data(batch_iter, memory_iter, nodes_iter) = chrom_intensity_test.at(nodes_iter); //intensity metric_output_data(batch_iter, memory_iter, nodes_iter) = chrom_intensity_test.at(nodes_iter); //intensity } else if (this->output_data_type_ == "IsApex") { TensorT isPeakApex = 0.0; for (const TensorT& peak_apex : peak_apices) { if (abs(chrom_time_test.at(nodes_iter) - peak_apex) < 1e-6) { isPeakApex = 1.0; } } loss_output_data(batch_iter, memory_iter, nodes_iter) = isPeakApex; //IsPeakApex metric_output_data(batch_iter, memory_iter, nodes_iter) = isPeakApex; //IsPeakApex } else if (this->output_data_type_ == "IsPeak") { TensorT isPeak = 0.0; for (const std::pair<TensorT, TensorT>& lr : best_lr) { if (chrom_time_test.at(nodes_iter) >= lr.first && chrom_time_test.at(nodes_iter) <= lr.second) { isPeak = 1.0; } } loss_output_data(batch_iter, memory_iter, nodes_iter) = isPeak; //IsPeak metric_output_data(batch_iter, memory_iter, nodes_iter) = isPeak; //IsPeak } } if (this->output_data_type_ == "EMG") { for (int i = 0; i < emgs.size(); ++i) { loss_output_data(batch_iter, memory_iter, i * 4) = emgs.at(i).getH(); metric_output_data(batch_iter, memory_iter, i * 4) = emgs.at(i).getH(); loss_output_data(batch_iter, memory_iter, i * 4 + 1) = emgs.at(i).getTau(); metric_output_data(batch_iter, memory_iter, i * 4 + 1) = emgs.at(i).getTau(); loss_output_data(batch_iter, memory_iter, i * 4 + 2) = emgs.at(i).getMu() / chrom_window_size_.first; metric_output_data(batch_iter, memory_iter, i * 4 + 2) = emgs.at(i).getMu() / chrom_window_size_.first; loss_output_data(batch_iter, memory_iter, i * 4 + 3) = emgs.at(i).getSigma() / chrom_window_size_.first; metric_output_data(batch_iter, memory_iter, i * 4 + 3) = emgs.at(i).getSigma() / chrom_window_size_.first; } } } } time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { simulateChromData_(input_data, loss_output_data, metric_output_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { simulateChromData_(input_data, loss_output_data, metric_output_data, time_steps); } /// public members that are passed to simulate methods std::pair<TensorT, TensorT> step_size_mu_ = std::make_pair(1, 1); std::pair<TensorT, TensorT> step_size_sigma_ = std::make_pair(0, 0); std::pair<TensorT, TensorT> chrom_window_size_ = std::make_pair(500, 500); std::pair<TensorT, TensorT> noise_mu_ = std::make_pair(0, 0); std::pair<TensorT, TensorT> noise_sigma_ = std::make_pair(0, 0.05); std::pair<TensorT, TensorT> baseline_height_ = std::make_pair(0, 0); std::pair<TensorT, TensorT> n_peaks_ = std::make_pair(10, 20); std::pair<TensorT, TensorT> emg_h_ = std::make_pair(0.1, 1.0); std::pair<TensorT, TensorT> emg_tau_ = std::make_pair(0, 1); std::pair<TensorT, TensorT> emg_mu_offset_ = std::make_pair(-10, 10); std::pair<TensorT, TensorT> emg_sigma_ = std::make_pair(0.1, 0.3); std::string output_data_type_ = "Points"; // "IsApex", "isPeak", "EMG" int encoding_size_ = 64; }; template<class ...ParameterTypes> void main_(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the model logger ModelLogger<float> model_logger(true, true, true, false, false, false, false); // define the data simulator std::size_t input_size; if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("EMG") != std::string::npos) { input_size = 64; } else { input_size = 512; } const std::size_t encoding_size = input_size / 8; DataSimulatorExt<float> data_simulator; if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("Points") != std::string::npos) data_simulator.output_data_type_ = "Points"; else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("IsPeak") != std::string::npos) data_simulator.output_data_type_ = "IsPeak"; else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("IsApex") != std::string::npos) data_simulator.output_data_type_ = "IsApex"; else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("EMG") != std::string::npos) data_simulator.output_data_type_ = "EMG"; data_simulator.encoding_size_ = encoding_size; if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("Hard") != std::string::npos) { data_simulator.step_size_mu_ = std::make_pair(1, 1); data_simulator.step_size_sigma_ = std::make_pair(0, 0); data_simulator.chrom_window_size_ = std::make_pair(input_size, input_size); data_simulator.noise_mu_ = std::make_pair(0, 0); data_simulator.noise_sigma_ = std::make_pair(0, 0.2); data_simulator.baseline_height_ = std::make_pair(0, 0); data_simulator.n_peaks_ = std::make_pair(0, 10); data_simulator.emg_h_ = std::make_pair(0.1, 1); data_simulator.emg_tau_ = std::make_pair(0, 1); data_simulator.emg_mu_offset_ = std::make_pair(-10, 10); data_simulator.emg_sigma_ = std::make_pair(10, 30); } else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("Medium") != std::string::npos) { // Some issues with the peak start/stop not touching the baseline data_simulator.step_size_mu_ = std::make_pair(1, 1); data_simulator.step_size_sigma_ = std::make_pair(0, 0); data_simulator.chrom_window_size_ = std::make_pair(input_size, input_size); data_simulator.noise_mu_ = std::make_pair(0, 0); data_simulator.noise_sigma_ = std::make_pair(0, 0.2); data_simulator.baseline_height_ = std::make_pair(0, 0); data_simulator.n_peaks_ = std::make_pair(1, 5); data_simulator.emg_h_ = std::make_pair(0.1, 1.0); data_simulator.emg_tau_ = std::make_pair(0, 0); data_simulator.emg_mu_offset_ = std::make_pair(0, 0); data_simulator.emg_sigma_ = std::make_pair(10, 30); } else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("Easy") != std::string::npos) { data_simulator.step_size_mu_ = std::make_pair(1, 1); data_simulator.step_size_sigma_ = std::make_pair(0, 0); data_simulator.chrom_window_size_ = std::make_pair(input_size, input_size); data_simulator.noise_mu_ = std::make_pair(0, 0); //data_simulator.noise_sigma_ = std::make_pair(0, 0.2); data_simulator.noise_sigma_ = std::make_pair(0, 0); data_simulator.baseline_height_ = std::make_pair(0, 0); data_simulator.n_peaks_ = std::make_pair(1, 2); data_simulator.emg_h_ = std::make_pair(1, 1); data_simulator.emg_tau_ = std::make_pair(0, 0); data_simulator.emg_mu_offset_ = std::make_pair(0, 0); data_simulator.emg_sigma_ = std::make_pair(10, 10); } // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the output nodes std::vector<std::string> output_nodes; if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("EMG") != std::string::npos) { for (int i = 0; i < 4; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } } else { for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } } // define the model interpreters std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; setModelInterpreterParameters(model_interpreters, args...); // define the model trainer ModelTrainerExt<float> model_trainer; setModelTrainerParameters(model_trainer, args...); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1; if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("EMG") != std::string::npos) { loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get() / float(input_size))) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get() / float(input_size))) }; loss_function_helpers.push_back(loss_function_helper1); } else { loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<BCEWithLogitsLossOp<float>>(BCEWithLogitsLossOp<float>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get() / float(input_size))) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<BCEWithLogitsLossGradOp<float>>(BCEWithLogitsLossGradOp<float>(1e-6, std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get() / float(input_size))) }; loss_function_helpers.push_back(loss_function_helper1); } model_trainer.setLossFunctionHelpers(loss_function_helpers); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("Points") != std::string::npos) { metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>()) }; metric_function_helper1.metric_names_ = { "Reconstruction-MAE" }; metric_function_helpers.push_back(metric_function_helper1); } else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("IsApex") != std::string::npos) { metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<PrecisionBCOp<float>>(PrecisionBCOp<float>()) }; metric_function_helper1.metric_names_ = { "IsPeakApex-PrecisionBC" }; metric_function_helpers.push_back(metric_function_helper1); } else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("IsPeak") != std::string::npos) { metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<PrecisionBCOp<float>>(PrecisionBCOp<float>()) }; metric_function_helper1.metric_names_ = { "IsPeak-PrecisionBC" }; metric_function_helpers.push_back(metric_function_helper1); } else if (std::get<EvoNetParameters::Examples::SimulationType>(parameters).get().find("EMG") != std::string::npos) { metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>()) }; metric_function_helper1.metric_names_ = { "EMGParam-MAE" }; metric_function_helpers.push_back(metric_function_helper1); } model_trainer.setMetricFunctionHelpers(metric_function_helpers); // define the initial population Model<float> model; if (std::get<EvoNetParameters::Main::MakeModel>(parameters).get()) { std::cout << "Making the model..." << std::endl; if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "DenoisingAE") { model_trainer.makeDenoisingAE(model, input_size, encoding_size, std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), true); } if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "VAE") { // TODO //model_trainer.makeVAE(model, input_size, encoding_size, // std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), // std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), // std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), // true); } else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "EncoderEMG") { // TODO //model_trainer.makeDenoisingAE(model, input_size, n_emgs, // std::get<EvoNetParameters::ModelTrainer::NHidden0>(parameters).get(), // std::get<EvoNetParameters::ModelTrainer::NHidden1>(parameters).get(), // std::get<EvoNetParameters::ModelTrainer::NHidden2>(parameters).get(), // true); } else if (std::get<EvoNetParameters::Examples::ModelType>(parameters).get() == "MPNN") { // TODO } model.setId(0); } else { ModelFile<float> model_file; ModelInterpreterFileDefaultDevice<float> model_interpreter_file; loadModelFromParameters(model, model_interpreters.at(0), model_file, model_interpreter_file, args...); } model.setName(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get()); //So that all output will be written to a specific directory // Train the model std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); } int main(int argc, char** argv) { // Parse the user commands int id_int = -1; std::string parameters_filename = ""; parseCommandLineArguments(argc, argv, id_int, parameters_filename); // Set the parameter names and defaults EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::Main::DeviceId device_id("device_id", 0); EvoNetParameters::Main::ModelName model_name("model_name", ""); EvoNetParameters::Main::MakeModel make_model("make_model", true); EvoNetParameters::Main::LoadModelCsv load_model_csv("load_model_csv", false); EvoNetParameters::Main::LoadModelBinary load_model_binary("load_model_binary", false); EvoNetParameters::Main::TrainModel train_model("train_model", true); EvoNetParameters::Main::EvolveModel evolve_model("evolve_model", false); EvoNetParameters::Main::EvaluateModel evaluate_model("evaluate_model", false); EvoNetParameters::Main::EvaluateModels evaluate_models("evaluate_models", false); EvoNetParameters::Examples::ModelType model_type("model_type", "Solution"); EvoNetParameters::Examples::SimulationType simulation_type("simulation_type", ""); EvoNetParameters::PopulationTrainer::PopulationName population_name("population_name", ""); EvoNetParameters::PopulationTrainer::NGenerations n_generations("n_generations", 1); EvoNetParameters::PopulationTrainer::NInterpreters n_interpreters("n_interpreters", 1); EvoNetParameters::PopulationTrainer::PruneModelNum prune_model_num("prune_model_num", 10); EvoNetParameters::PopulationTrainer::RemoveIsolatedNodes remove_isolated_nodes("remove_isolated_nodes", true); EvoNetParameters::PopulationTrainer::CheckCompleteModelInputToOutput check_complete_model_input_to_output("check_complete_model_input_to_output", true); EvoNetParameters::PopulationTrainer::PopulationSize population_size("population_size", 128); EvoNetParameters::PopulationTrainer::NTop n_top("n_top", 8); EvoNetParameters::PopulationTrainer::NRandom n_random("n_random", 8); EvoNetParameters::PopulationTrainer::NReplicatesPerModel n_replicates_per_model("n_replicates_per_model", 1); EvoNetParameters::PopulationTrainer::ResetModelCopyWeights reset_model_copy_weights("reset_model_copy_weights", true); EvoNetParameters::PopulationTrainer::ResetModelTemplateWeights reset_model_template_weights("reset_model_template_weights", true); EvoNetParameters::PopulationTrainer::Logging population_logging("population_logging", true); EvoNetParameters::PopulationTrainer::SetPopulationSizeFixed set_population_size_fixed("set_population_size_fixed", false); EvoNetParameters::PopulationTrainer::SetPopulationSizeDoubling set_population_size_doubling("set_population_size_doubling", true); EvoNetParameters::PopulationTrainer::SetTrainingStepsByModelSize set_training_steps_by_model_size("set_training_steps_by_model_size", false); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 32); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 64); EvoNetParameters::ModelTrainer::NEpochsTraining n_epochs_training("n_epochs_training", 1000); EvoNetParameters::ModelTrainer::NEpochsValidation n_epochs_validation("n_epochs_validation", 25); EvoNetParameters::ModelTrainer::NEpochsEvaluation n_epochs_evaluation("n_epochs_evaluation", 10); EvoNetParameters::ModelTrainer::NTBTTSteps n_tbtt_steps("n_tbtt_steps", 64); EvoNetParameters::ModelTrainer::NTETTSteps n_tett_steps("n_tett_steps", 64); EvoNetParameters::ModelTrainer::Verbosity verbosity("verbosity", 1); EvoNetParameters::ModelTrainer::LoggingTraining logging_training("logging_training", true); EvoNetParameters::ModelTrainer::LoggingValidation logging_validation("logging_validation", false); EvoNetParameters::ModelTrainer::LoggingEvaluation logging_evaluation("logging_evaluation", true); EvoNetParameters::ModelTrainer::FindCycles find_cycles("find_cycles", true); EvoNetParameters::ModelTrainer::FastInterpreter fast_interpreter("fast_interpreter", true); EvoNetParameters::ModelTrainer::PreserveOoO preserve_ooo("preserve_ooo", true); EvoNetParameters::ModelTrainer::InterpretModel interpret_model("interpret_model", true); EvoNetParameters::ModelTrainer::ResetModel reset_model("reset_model", false); EvoNetParameters::ModelTrainer::NHidden0 n_hidden_0("n_hidden_0", 512); EvoNetParameters::ModelTrainer::NHidden1 n_hidden_1("n_hidden_1", 256); EvoNetParameters::ModelTrainer::NHidden2 n_hidden_2("n_hidden_2", 128); EvoNetParameters::ModelTrainer::LossFncWeight0 loss_fnc_weight_0("loss_fnc_weight_0", 1); EvoNetParameters::ModelTrainer::LossFncWeight1 loss_fnc_weight_1("loss_fnc_weight_1", 1e-6); EvoNetParameters::ModelTrainer::LossFncWeight2 loss_fnc_weight_2("loss_fnc_weight_2", 1e-6); EvoNetParameters::ModelTrainer::ResetInterpreter reset_interpreter("reset_interpreter", true); EvoNetParameters::ModelReplicator::NNodeDownAdditionsLB n_node_down_additions_lb("n_node_down_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsLB n_node_right_additions_lb("n_node_right_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesLB n_node_down_copies_lb("n_node_down_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesLB n_node_right_copies_lb("n_node_right_copies_lb", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsLB n_link_additons_lb("n_link_additons_lb", 0); EvoNetParameters::ModelReplicator::NLinkCopiesLB n_link_copies_lb("n_link_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsLB n_node_deletions_lb("n_node_deletions_lb", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsLB n_link_deletions_lb("n_link_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesLB n_node_activation_changes_lb("n_node_activation_changes_lb", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesLB n_node_integration_changes_lb("n_node_integration_changes_lb", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsLB n_module_additions_lb("n_module_additions_lb", 0); EvoNetParameters::ModelReplicator::NModuleCopiesLB n_module_copies_lb("n_module_copies_lb", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsLB n_module_deletions_lb("n_module_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownAdditionsUB n_node_down_additions_ub("n_node_down_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsUB n_node_right_additions_ub("n_node_right_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesUB n_node_down_copies_ub("n_node_down_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesUB n_node_right_copies_ub("n_node_right_copies_ub", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsUB n_link_additons_ub("n_link_additons_ub", 0); EvoNetParameters::ModelReplicator::NLinkCopiesUB n_link_copies_ub("n_link_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsUB n_node_deletions_ub("n_node_deletions_ub", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsUB n_link_deletions_ub("n_link_deletions_ub", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesUB n_node_activation_changes_ub("n_node_activation_changes_ub", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesUB n_node_integration_changes_ub("n_node_integration_changes_ub", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsUB n_module_additions_ub("n_module_additions_ub", 0); EvoNetParameters::ModelReplicator::NModuleCopiesUB n_module_copies_ub("n_module_copies_ub", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsUB n_module_deletions_ub("n_module_deletions_ub", 0); EvoNetParameters::ModelReplicator::SetModificationRateFixed set_modification_rate_fixed("set_modification_rate_fixed", false); EvoNetParameters::ModelReplicator::SetModificationRateByPrevError set_modification_rate_by_prev_error("set_modification_rate_by_prev_error", false); auto parameters = std::make_tuple(id, data_dir, device_id, model_name, make_model, load_model_csv, load_model_binary, train_model, evolve_model, evaluate_model, evaluate_models, model_type, simulation_type, population_name, n_generations, n_interpreters, prune_model_num, remove_isolated_nodes, check_complete_model_input_to_output, population_size, n_top, n_random, n_replicates_per_model, reset_model_copy_weights, reset_model_template_weights, population_logging, set_population_size_fixed, set_population_size_doubling, set_training_steps_by_model_size, batch_size, memory_size, n_epochs_training, n_epochs_validation, n_epochs_evaluation, n_tbtt_steps, n_tett_steps, verbosity, logging_training, logging_validation, logging_evaluation, find_cycles, fast_interpreter, preserve_ooo, interpret_model, reset_model, n_hidden_0, n_hidden_1, n_hidden_2, loss_fnc_weight_0, loss_fnc_weight_1, loss_fnc_weight_2, reset_interpreter, n_node_down_additions_lb, n_node_right_additions_lb, n_node_down_copies_lb, n_node_right_copies_lb, n_link_additons_lb, n_link_copies_lb, n_node_deletions_lb, n_link_deletions_lb, n_node_activation_changes_lb, n_node_integration_changes_lb, n_module_additions_lb, n_module_copies_lb, n_module_deletions_lb, n_node_down_additions_ub, n_node_right_additions_ub, n_node_down_copies_ub, n_node_right_copies_ub, n_link_additons_ub, n_link_copies_ub, n_node_deletions_ub, n_link_deletions_ub, n_node_activation_changes_ub, n_node_integration_changes_ub, n_module_additions_ub, n_module_copies_ub, n_module_deletions_ub, set_modification_rate_fixed, set_modification_rate_by_prev_error); // Read in the parameters LoadParametersFromCsv loadParametersFromCsv(id_int, parameters_filename); parameters = EvoNet::apply([&loadParametersFromCsv](auto&& ...args) { return loadParametersFromCsv(args...); }, parameters); // Run the application EvoNet::apply([](auto&& ...args) { main_(args ...); }, parameters); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_POPULATIONTRAINERFILE_H #define EVONET_POPULATIONTRAINERFILE_H // .h #include <EvoNet/ml/PopulationTrainer.h> #include <iostream> #include <fstream> #include <vector> // .cpp #include <EvoNet/io/CSVWriter.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/WeightFile.h> #include <EvoNet/io/LinkFile.h> #include <EvoNet/io/NodeFile.h> #include <EvoNet/io/ModelFile.h> namespace EvoNet { /** @brief PopulationTrainerFile */ template<typename TensorT> class PopulationTrainerFile { public: PopulationTrainerFile() = default; ///< Default constructor ~PopulationTrainerFile() = default; ///< Default destructor /** @brief remove characters that cannot be included in a filename @param[in, out] model_name The name of the model */ static void sanitizeModelName(std::string& model_name); /** @brief write all models to file Files written include: - links - nodes - weights - graph representation of the model in DOT format for vis using GraphVIZ @param[in] models The vector (i.e., population) of models @param[in] filename The base string to use when writing out the data files @returns True if successful, false otherwise */ bool storeModels(std::vector<Model<TensorT>>& models, const std::string& filename); /** @brief write Model average validation error to file @param[in] models_validation_errors Vector of model_name/average error pairs @returns True if successful, false otherwise */ bool storeModelValidations( const std::string& filename, const std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_validation_errors); }; template<typename TensorT> void PopulationTrainerFile<TensorT>::sanitizeModelName(std::string& model_name) { // sanitize the model name std::string illegalChars = "\\/:?\"<>|"; for (std::string::iterator it = model_name.begin(); it < model_name.end(); ++it) { bool found = illegalChars.find(*it) != std::string::npos; if (found) { *it = ' '; } } } template<typename TensorT> bool PopulationTrainerFile<TensorT>::storeModels(std::vector<Model<TensorT>>& models, const std::string& filename) { std::fstream file; // Open the file in truncate mode file.open(filename + ".sh", std::ios::out | std::ios::trunc); for (Model<TensorT>& model : models) { // write the model to file //std::string model_name = model.getName(); //sanitizeModelName(model_name); //std::string model_name_score = model_name + "_"; int model_id = model.getId(); std::string model_name_score = std::to_string(model_id) + "_"; WeightFile<TensorT> weightfile; weightfile.storeWeightsCsv(model_name_score + filename + "_Weights.csv", model.weights_); LinkFile linkfile; linkfile.storeLinksCsv(model_name_score + filename + "_Links.csv", model.links_); NodeFile<TensorT> nodefile; nodefile.storeNodesCsv(model_name_score + filename + "_Nodes.csv", model.nodes_); ModelFile<TensorT> modelfile; std::string dot_filename = model_name_score + filename + "_Graph.gv"; modelfile.storeModelDot(dot_filename, model); char sh_cmd_char[512]; sprintf(sh_cmd_char, "dot -Tpng -o %s.png %s\n", dot_filename.data(), dot_filename.data()); std::string sh_cmd(sh_cmd_char); file << sh_cmd; } file.close(); return true; } template<typename TensorT> bool PopulationTrainerFile<TensorT>::storeModelValidations( const std::string& filename, const std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_validation_errors) { CSVWriter csvwriter(filename); // write the headers to the first line const std::vector<std::string> headers = { "model_id", "model_name", "ave_validation_error", "generation" }; csvwriter.writeDataInRow(headers.begin(), headers.end()); int generation_iter = 0; for (const std::vector<std::tuple<int, std::string, TensorT>>& generation_errors : models_validation_errors) { for (const std::tuple<int, std::string, TensorT>& model_validation_error : generation_errors) { std::vector<std::string> row; row.push_back(std::to_string(std::get<0>(model_validation_error))); row.push_back(std::get<1>(model_validation_error)); char error[512]; sprintf(error, "%0.6f", std::get<2>(model_validation_error)); std::string error_str(error); row.push_back(error_str); row.push_back(std::to_string(generation_iter)); // write to file csvwriter.writeDataInRow(row.begin(), row.end()); } ++generation_iter; } return true; } } #endif //EVONET_POPULATIONTRAINERFILE_H<file_sep> cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) project("SmartPeak_coding_tests") # -------------------------------------------------------------------------- # cppcheck tests find_package(cppcheck) if( CPPCHECK_FOUND ) include( ${PROJECT_SOURCE_DIR}/cppcheck.cmake ) endif(CPPCHECK_FOUND) # -------------------------------------------------------------------------- # cpplint testing find_program(PYTHON_EXECUTABLE python DOC "python executable used to perform coding convention test.") if("${PYTHON_EXECUTABLE}" STREQUAL "PYTHON_EXECUTABLE-NOTFOUND") message(STATUS "Info: The program python could not be found. Coding convention check will not be available! Add the location of python(.exe) to your PATH environment variable.") else() include( ${PROJECT_SOURCE_DIR}/createcpplinttests.cmake ) endif("${PYTHON_EXECUTABLE}" STREQUAL "PYTHON_EXECUTABLE-NOTFOUND") <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE LossFunction test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/LossFunction.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(lossFunction) /** ManhattanDistanceLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorEuclideanDistanceOp) { ManhattanDistanceLossOp<double>* ptrReLU = nullptr; ManhattanDistanceLossOp<double>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorEuclideanDistanceOp) { ManhattanDistanceLossOp<double>* ptrReLU = nullptr; ptrReLU = new ManhattanDistanceLossOp<double>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(gettersAndSettersEuclideanDistanceOp) { ManhattanDistanceLossOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "ManhattanDistanceLossOp"); } /** ManhattanDistanceLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorEuclideanDistanceGradOp) { ManhattanDistanceLossGradOp<double>* ptrReLU = nullptr; ManhattanDistanceLossGradOp<double>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorEuclideanDistanceGradOp) { ManhattanDistanceLossGradOp<double>* ptrReLU = nullptr; ptrReLU = new ManhattanDistanceLossGradOp<double>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(gettersAndSettersEuclideanDistanceGradOp) { ManhattanDistanceLossGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "ManhattanDistanceLossGradOp"); } /** L2NormLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorL2NormOp) { L2NormLossOp<double>* ptrL2Norm = nullptr; L2NormLossOp<double>* nullPointerL2Norm = nullptr; BOOST_CHECK_EQUAL(ptrL2Norm, nullPointerL2Norm); } BOOST_AUTO_TEST_CASE(destructorL2NormOp) { L2NormLossOp<double>* ptrL2Norm = nullptr; ptrL2Norm = new L2NormLossOp<double>(); delete ptrL2Norm; } BOOST_AUTO_TEST_CASE(gettersAndSettersL2NormOp) { L2NormLossOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "L2NormLossOp"); } /** L2NormLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorL2NormGradOp) { L2NormLossGradOp<double>* ptrL2Norm = nullptr; L2NormLossGradOp<double>* nullPointerL2Norm = nullptr; BOOST_CHECK_EQUAL(ptrL2Norm, nullPointerL2Norm); } BOOST_AUTO_TEST_CASE(destructorL2NormGradOp) { L2NormLossGradOp<double>* ptrL2Norm = nullptr; ptrL2Norm = new L2NormLossGradOp<double>(); delete ptrL2Norm; } BOOST_AUTO_TEST_CASE(gettersAndSettersL2NormGradOp) { L2NormLossGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "L2NormLossGradOp"); } /** CrossEntropyOp Tests */ BOOST_AUTO_TEST_CASE(constructorCrossEntropyOp) { BCELossOp<double>* ptrCrossEntropy = nullptr; BCELossOp<double>* nullPointerCrossEntropy = nullptr; BOOST_CHECK_EQUAL(ptrCrossEntropy, nullPointerCrossEntropy); } BOOST_AUTO_TEST_CASE(destructorCrossEntropyOp) { BCELossOp<double>* ptrCrossEntropy = nullptr; ptrCrossEntropy = new BCELossOp<double>(); delete ptrCrossEntropy; } BOOST_AUTO_TEST_CASE(gettersAndSettersBCEOp) { BCELossOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "BCELossOp"); } /** CrossEntropyGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorCrossEntropyGradOp) { BCELossGradOp<double>* ptrCrossEntropy = nullptr; BCELossGradOp<double>* nullPointerCrossEntropy = nullptr; BOOST_CHECK_EQUAL(ptrCrossEntropy, nullPointerCrossEntropy); } BOOST_AUTO_TEST_CASE(destructorCrossEntropyGradOp) { BCELossGradOp<double>* ptrCrossEntropy = nullptr; ptrCrossEntropy = new BCELossGradOp<double>(); delete ptrCrossEntropy; } BOOST_AUTO_TEST_CASE(gettersAndSettersBCEGradOp) { BCELossGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "BCELossGradOp"); } /** NegativeLogLikelihoodLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorNegativeLogLikelihoodOp) { NegativeLogLikelihoodLossOp<double>* ptrNegativeLogLikelihood = nullptr; NegativeLogLikelihoodLossOp<double>* nullPointerNegativeLogLikelihood = nullptr; BOOST_CHECK_EQUAL(ptrNegativeLogLikelihood, nullPointerNegativeLogLikelihood); } BOOST_AUTO_TEST_CASE(destructorNegativeLogLikelihoodOp) { NegativeLogLikelihoodLossOp<double>* ptrNegativeLogLikelihood = nullptr; ptrNegativeLogLikelihood = new NegativeLogLikelihoodLossOp<double>(); delete ptrNegativeLogLikelihood; } /** NegativeLogLikelihoodLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorNegativeLogLikelihoodGradOp) { NegativeLogLikelihoodLossGradOp<double>* ptrNegativeLogLikelihood = nullptr; NegativeLogLikelihoodLossGradOp<double>* nullPointerNegativeLogLikelihood = nullptr; BOOST_CHECK_EQUAL(ptrNegativeLogLikelihood, nullPointerNegativeLogLikelihood); } BOOST_AUTO_TEST_CASE(destructorNegativeLogLikelihoodGradOp) { NegativeLogLikelihoodLossGradOp<double>* ptrNegativeLogLikelihood = nullptr; ptrNegativeLogLikelihood = new NegativeLogLikelihoodLossGradOp<double>(); delete ptrNegativeLogLikelihood; } /** MSELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSEOp) { MSELossOp<double>* ptrMSE = nullptr; MSELossOp<double>* nullPointerMSE = nullptr; BOOST_CHECK_EQUAL(ptrMSE, nullPointerMSE); } BOOST_AUTO_TEST_CASE(destructorMSEOp) { MSELossOp<double>* ptrMSE = nullptr; ptrMSE = new MSELossOp<double>(); delete ptrMSE; } /** MSELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSEGradOp) { MSELossGradOp<double>* ptrMSE = nullptr; MSELossGradOp<double>* nullPointerMSE = nullptr; BOOST_CHECK_EQUAL(ptrMSE, nullPointerMSE); } BOOST_AUTO_TEST_CASE(destructorMSEGradOp) { MSELossGradOp<double>* ptrMSE = nullptr; ptrMSE = new MSELossGradOp<double>(); delete ptrMSE; } /** MAELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAEOp) { MAELossOp<double>* ptrMAE = nullptr; MAELossOp<double>* nullPointerMAE = nullptr; BOOST_CHECK_EQUAL(ptrMAE, nullPointerMAE); } BOOST_AUTO_TEST_CASE(destructorMAEOp) { MAELossOp<double>* ptrMAE = nullptr; ptrMAE = new MAELossOp<double>(); delete ptrMAE; } /** MAELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAEGradOp) { MAELossGradOp<double>* ptrMAE = nullptr; MAELossGradOp<double>* nullPointerMAE = nullptr; BOOST_CHECK_EQUAL(ptrMAE, nullPointerMAE); } BOOST_AUTO_TEST_CASE(destructorMAEGradOp) { MAELossGradOp<double>* ptrMAE = nullptr; ptrMAE = new MAELossGradOp<double>(); delete ptrMAE; } /** MRSELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMRSEOp) { MRSELossOp<double>* ptrMRSE = nullptr; MRSELossOp<double>* nullPointerMRSE = nullptr; BOOST_CHECK_EQUAL(ptrMRSE, nullPointerMRSE); } BOOST_AUTO_TEST_CASE(destructorMRSEOp) { MRSELossOp<double>* ptrMRSE = nullptr; ptrMRSE = new MRSELossOp<double>(); delete ptrMRSE; } /** MRSELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMRSEGradOp) { MRSELossGradOp<double>* ptrMRSE = nullptr; MRSELossGradOp<double>* nullPointerMRSE = nullptr; BOOST_CHECK_EQUAL(ptrMRSE, nullPointerMRSE); } BOOST_AUTO_TEST_CASE(destructorMRSEGradOp) { MRSELossGradOp<double>* ptrMRSE = nullptr; ptrMRSE = new MRSELossGradOp<double>(); delete ptrMRSE; } /** MLELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMLEOp) { MLELossOp<double>* ptrMLE = nullptr; MLELossOp<double>* nullPointerMLE = nullptr; BOOST_CHECK_EQUAL(ptrMLE, nullPointerMLE); } BOOST_AUTO_TEST_CASE(destructorMLEOp) { MLELossOp<double>* ptrMLE = nullptr; ptrMLE = new MLELossOp<double>(); delete ptrMLE; } /** MLELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMLEGradOp) { MLELossGradOp<double>* ptrMLE = nullptr; MLELossGradOp<double>* nullPointerMLE = nullptr; BOOST_CHECK_EQUAL(ptrMLE, nullPointerMLE); } BOOST_AUTO_TEST_CASE(destructorMLEGradOp) { MLELossGradOp<double>* ptrMLE = nullptr; ptrMLE = new MLELossGradOp<double>(); delete ptrMLE; } /** KLDivergenceMuLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceMuOp) { KLDivergenceMuLossOp<double>* ptrKLDivergenceMu = nullptr; KLDivergenceMuLossOp<double>* nullPointerKLDivergenceMu = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceMu, nullPointerKLDivergenceMu); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceMuOp) { KLDivergenceMuLossOp<double>* ptrKLDivergenceMu = nullptr; ptrKLDivergenceMu = new KLDivergenceMuLossOp<double>(); delete ptrKLDivergenceMu; } BOOST_AUTO_TEST_CASE(gettersAndSettersKLDivergenceMuOp) { KLDivergenceMuLossOp<float> operation(1e-3, 1, 5); BOOST_CHECK_EQUAL(operation.getName(), "KLDivergenceMuLossOp"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 3); BOOST_CHECK_CLOSE(operation.getParameters().at(0), 1e-3, 1e-4); BOOST_CHECK_EQUAL(operation.getParameters().at(1), 1); BOOST_CHECK_EQUAL(operation.getParameters().at(2), 5); } /** KLDivergenceMuLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceMuGradOp) { KLDivergenceMuLossGradOp<double>* ptrKLDivergenceMu = nullptr; KLDivergenceMuLossGradOp<double>* nullPointerKLDivergenceMu = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceMu, nullPointerKLDivergenceMu); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceMuGradOp) { KLDivergenceMuLossGradOp<double>* ptrKLDivergenceMu = nullptr; ptrKLDivergenceMu = new KLDivergenceMuLossGradOp<double>(); delete ptrKLDivergenceMu; } BOOST_AUTO_TEST_CASE(gettersAndSettersKLDivergenceGradMuOp) { KLDivergenceMuLossGradOp<float> operation(1e-3, 1, 5); BOOST_CHECK_EQUAL(operation.getName(), "KLDivergenceMuLossGradOp"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 3); BOOST_CHECK_CLOSE(operation.getParameters().at(0), 1e-3, 1e-4); BOOST_CHECK_EQUAL(operation.getParameters().at(1), 1); BOOST_CHECK_EQUAL(operation.getParameters().at(2), 5); } /** KLDivergenceLogVarLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceLogVarOp) { KLDivergenceLogVarLossOp<double>* ptrKLDivergenceLogVar = nullptr; KLDivergenceLogVarLossOp<double>* nullPointerKLDivergenceLogVar = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceLogVar, nullPointerKLDivergenceLogVar); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceLogVarOp) { KLDivergenceLogVarLossOp<double>* ptrKLDivergenceLogVar = nullptr; ptrKLDivergenceLogVar = new KLDivergenceLogVarLossOp<double>(); delete ptrKLDivergenceLogVar; } BOOST_AUTO_TEST_CASE(gettersAndSettersKLDivergenceLogVarOp) { KLDivergenceLogVarLossOp<float> operation(1e-3, 1, 5); BOOST_CHECK_EQUAL(operation.getName(), "KLDivergenceLogVarLossOp"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 3); BOOST_CHECK_CLOSE(operation.getParameters().at(0), 1e-3, 1e-4); BOOST_CHECK_EQUAL(operation.getParameters().at(1), 1); BOOST_CHECK_EQUAL(operation.getParameters().at(2), 5); } /** KLDivergenceLogVarLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceLogVarGradOp) { KLDivergenceLogVarLossGradOp<double>* ptrKLDivergenceLogVar = nullptr; KLDivergenceLogVarLossGradOp<double>* nullPointerKLDivergenceLogVar = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceLogVar, nullPointerKLDivergenceLogVar); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceLogVarGradOp) { KLDivergenceLogVarLossGradOp<double>* ptrKLDivergenceLogVar = nullptr; ptrKLDivergenceLogVar = new KLDivergenceLogVarLossGradOp<double>(); delete ptrKLDivergenceLogVar; } BOOST_AUTO_TEST_CASE(gettersAndSettersKLDivergenceLogVarLossGradOp) { KLDivergenceLogVarLossGradOp<float> operation(1e-3, 1, 5); BOOST_CHECK_EQUAL(operation.getName(), "KLDivergenceLogVarLossGradOp"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 3); BOOST_CHECK_CLOSE(operation.getParameters().at(0), 1e-3, 1e-4); BOOST_CHECK_EQUAL(operation.getParameters().at(1), 1); BOOST_CHECK_EQUAL(operation.getParameters().at(2), 5); } /** BCEWithLogitsLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorBCEWithLogitsOp) { BCEWithLogitsLossOp<double>* ptrBCEWithLogits = nullptr; BCEWithLogitsLossOp<double>* nullPointerBCEWithLogits = nullptr; BOOST_CHECK_EQUAL(ptrBCEWithLogits, nullPointerBCEWithLogits); } BOOST_AUTO_TEST_CASE(destructorBCEWithLogitsOp) { BCEWithLogitsLossOp<double>* ptrBCEWithLogits = nullptr; ptrBCEWithLogits = new BCEWithLogitsLossOp<double>(); delete ptrBCEWithLogits; } /** BCEWithLogitsLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorBCEWithLogitsGradOp) { BCEWithLogitsLossGradOp<double>* ptrBCEWithLogits = nullptr; BCEWithLogitsLossGradOp<double>* nullPointerBCEWithLogits = nullptr; BOOST_CHECK_EQUAL(ptrBCEWithLogits, nullPointerBCEWithLogits); } BOOST_AUTO_TEST_CASE(destructorBCEWithLogitsGradOp) { BCEWithLogitsLossGradOp<double>* ptrBCEWithLogits = nullptr; ptrBCEWithLogits = new BCEWithLogitsLossGradOp<double>(); delete ptrBCEWithLogits; } /** MSERangeUBLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSERangeUBOp) { MSERangeUBLossOp<double>* ptrMSERangeUB = nullptr; MSERangeUBLossOp<double>* nullPointerMSERangeUB = nullptr; BOOST_CHECK_EQUAL(ptrMSERangeUB, nullPointerMSERangeUB); } BOOST_AUTO_TEST_CASE(destructorMSERangeUBOp) { MSERangeUBLossOp<double>* ptrMSERangeUB = nullptr; ptrMSERangeUB = new MSERangeUBLossOp<double>(); delete ptrMSERangeUB; } BOOST_AUTO_TEST_CASE(gettersAndSettersMSERangeUBOp) { MSERangeUBLossOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MSERangeUBLossOp"); } /** MSERangeUBLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSERangeUBGradOp) { MSERangeUBLossGradOp<double>* ptrMSERangeUB = nullptr; MSERangeUBLossGradOp<double>* nullPointerMSERangeUB = nullptr; BOOST_CHECK_EQUAL(ptrMSERangeUB, nullPointerMSERangeUB); } BOOST_AUTO_TEST_CASE(destructorMSERangeUBGradOp) { MSERangeUBLossGradOp<double>* ptrMSERangeUB = nullptr; ptrMSERangeUB = new MSERangeUBLossGradOp<double>(); delete ptrMSERangeUB; } BOOST_AUTO_TEST_CASE(gettersAndSettersMSERangeUBGradOp) { MSERangeUBLossGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MSERangeUBLossGradOp"); } /** MSERangeLBLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSERangeLBOp) { MSERangeLBLossOp<double>* ptrMSERangeLB = nullptr; MSERangeLBLossOp<double>* nullPointerMSERangeLB = nullptr; BOOST_CHECK_EQUAL(ptrMSERangeLB, nullPointerMSERangeLB); } BOOST_AUTO_TEST_CASE(destructorMSERangeLBOp) { MSERangeLBLossOp<double>* ptrMSERangeLB = nullptr; ptrMSERangeLB = new MSERangeLBLossOp<double>(); delete ptrMSERangeLB; } BOOST_AUTO_TEST_CASE(gettersAndSettersMSERangeLBOp) { MSERangeLBLossOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MSERangeLBLossOp"); } /** MSERangeLBLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMSERangeLBGradOp) { MSERangeLBLossGradOp<double>* ptrMSERangeLB = nullptr; MSERangeLBLossGradOp<double>* nullPointerMSERangeLB = nullptr; BOOST_CHECK_EQUAL(ptrMSERangeLB, nullPointerMSERangeLB); } BOOST_AUTO_TEST_CASE(destructorMSERangeLBGradOp) { MSERangeLBLossGradOp<double>* ptrMSERangeLB = nullptr; ptrMSERangeLB = new MSERangeLBLossGradOp<double>(); delete ptrMSERangeLB; } BOOST_AUTO_TEST_CASE(gettersAndSettersMSERangeLBGradOp) { MSERangeLBLossGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MSERangeLBLossGradOp"); } /** KLDivergenceCatLossOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceCatOp) { KLDivergenceCatLossOp<double>* ptrKLDivergenceCat = nullptr; KLDivergenceCatLossOp<double>* nullPointerKLDivergenceCat = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceCat, nullPointerKLDivergenceCat); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceCatOp) { KLDivergenceCatLossOp<double>* ptrKLDivergenceCat = nullptr; ptrKLDivergenceCat = new KLDivergenceCatLossOp<double>(); delete ptrKLDivergenceCat; } BOOST_AUTO_TEST_CASE(gettersAndSettersKLDivergenceCatOp) { KLDivergenceCatLossOp<float> operation(1e-3, 1, 5); BOOST_CHECK_EQUAL(operation.getName(), "KLDivergenceCatLossOp"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 3); BOOST_CHECK_CLOSE(operation.getParameters().at(0), 1e-3, 1e-4); BOOST_CHECK_EQUAL(operation.getParameters().at(1), 1); BOOST_CHECK_EQUAL(operation.getParameters().at(2), 5); } /** KLDivergenceCatLossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorKLDivergenceCatGradOp) { KLDivergenceCatLossGradOp<double>* ptrKLDivergenceCat = nullptr; KLDivergenceCatLossGradOp<double>* nullPointerKLDivergenceCat = nullptr; BOOST_CHECK_EQUAL(ptrKLDivergenceCat, nullPointerKLDivergenceCat); } BOOST_AUTO_TEST_CASE(destructorKLDivergenceCatGradOp) { KLDivergenceCatLossGradOp<double>* ptrKLDivergenceCat = nullptr; ptrKLDivergenceCat = new KLDivergenceCatLossGradOp<double>(); delete ptrKLDivergenceCat; } BOOST_AUTO_TEST_CASE(gettersAndSettersKLDivergenceCatGradOp) { KLDivergenceCatLossGradOp<float> operation(1e-3, 1, 5); BOOST_CHECK_EQUAL(operation.getName(), "KLDivergenceCatLossGradOp"); BOOST_CHECK_EQUAL(operation.getParameters().size(), 3); BOOST_CHECK_CLOSE(operation.getParameters().at(0), 1e-3, 1e-4); BOOST_CHECK_EQUAL(operation.getParameters().at(1), 1); BOOST_CHECK_EQUAL(operation.getParameters().at(2), 5); } /** MAPELossOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAPELossOp) { MAPELossOp<double>* ptrMAPELoss = nullptr; MAPELossOp<double>* nullPointerMAPELoss = nullptr; BOOST_CHECK_EQUAL(ptrMAPELoss, nullPointerMAPELoss); } BOOST_AUTO_TEST_CASE(destructorMAPELossOp) { MAPELossOp<double>* ptrMAPELoss = nullptr; ptrMAPELoss = new MAPELossOp<double>(); delete ptrMAPELoss; } BOOST_AUTO_TEST_CASE(gettersAndSettersMAPELossOp) { MAPELossOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MAPELossOp"); } /** MAPELossGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAPELossGradOp) { MAPELossGradOp<double>* ptrMAPELoss = nullptr; MAPELossGradOp<double>* nullPointerMAPELoss = nullptr; BOOST_CHECK_EQUAL(ptrMAPELoss, nullPointerMAPELoss); } BOOST_AUTO_TEST_CASE(destructorMAPELossGradOp) { MAPELossGradOp<double>* ptrMAPELoss = nullptr; ptrMAPELoss = new MAPELossGradOp<double>(); delete ptrMAPELoss; } BOOST_AUTO_TEST_CASE(gettersAndSettersMAPELossGradOp) { MAPELossGradOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "MAPELossGradOp"); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_POPULATIONTRAINEREXPERIMENTAL_H #define EVONET_POPULATIONTRAINEREXPERIMENTAL_H // .h #include <EvoNet/ml/PopulationTrainer.h> namespace EvoNet { /** @brief Experimental methods for PopulationTrainer */ template<typename TensorT, typename InterpreterT> class PopulationTrainerExperimental: public PopulationTrainer<TensorT, InterpreterT> { public: PopulationTrainerExperimental() = default; ///< Default constructor ~PopulationTrainerExperimental() = default; ///< Default destructor /// Overrides and members used in all examples bool set_population_size_fixed_ = false; bool set_population_size_doubling_ = false; bool set_training_steps_by_model_size_ = false; /* @brief Implementation of the `adaptivePopulationScheduler` */ void adaptivePopulationScheduler(const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) override; /* @brief Implementation of the `trainingPopulationLogger` */ void trainingPopulationLogger(const int& n_generations, std::vector<Model<TensorT>>& models, PopulationLogger<TensorT>& population_logger, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation) override; /* @brief `adaptivePopulationScheduler` helper method to adjust the population size based on the number of generations error rates of training @param[in] n_generations The number of generations @param[in] models A vector of models representing the population @param[in] models_errors_per_generations A record of model errors per generation */ void setPopulationSizeFixed( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations); /* @brief `adaptivePopulationScheduler` helper method to adjust the population size for growth and selection modes 1. growth phase: each model doubles for a period of time (e.g., 1, 2, 4, 8, 16, 32, 64, 128, ...) 2. selection phase: best models are selected (e.g., from 64 to 8) @param[in] n_generations The number of generations @param[in] models A vector of models representing the population @param[in] models_errors_per_generations A record of model errors per generation */ void setPopulationSizeDoubling( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations); /* @brief Adjust the number of training steps based on the average model size @param[in] models A vector of models representing the population */ void setTrainingStepsByModelSize(std::vector<Model<TensorT>>& models); private: int n_top__; int n_random__; int prune_model_num__; }; template<typename TensorT, typename InterpreterT> inline void PopulationTrainerExperimental<TensorT, InterpreterT>::adaptivePopulationScheduler(const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // Adjust the population size if (set_population_size_fixed_) this->setPopulationSizeFixed(n_generations, models, models_errors_per_generations); else if (set_population_size_doubling_) this->setPopulationSizeDoubling(n_generations, models, models_errors_per_generations); // Adjust the training steps if (set_training_steps_by_model_size_) this->setTrainingStepsByModelSize(models); } template<typename TensorT, typename InterpreterT> inline void PopulationTrainerExperimental<TensorT, InterpreterT>::trainingPopulationLogger(const int& n_generations, std::vector<Model<TensorT>>& models, PopulationLogger<TensorT>& population_logger, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation) { // Export the selected models for (auto& model : models) { ModelFile<TensorT> data; data.storeModelCsv(model.getName() + "_" + std::to_string(n_generations) + "_nodes.csv", model.getName() + "_" + std::to_string(n_generations) + "_links.csv", model.getName() + "_" + std::to_string(n_generations) + "_weights.csv", model); } // Log the population statistics population_logger.writeLogs(n_generations, models_validation_errors_per_generation); } template<typename TensorT, typename InterpreterT> inline void PopulationTrainerExperimental<TensorT, InterpreterT>::setPopulationSizeFixed( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // Adjust the population sizes const size_t selection_ratio = this->getPopulationSize() / this->getNRandom(); if (n_generations == 0) { this->setNReplicatesPerModel(this->getPopulationSize() - 1); } else { this->setNReplicatesPerModel(selection_ratio - 1); } } template<typename TensorT, typename InterpreterT> inline void PopulationTrainerExperimental<TensorT, InterpreterT>::setPopulationSizeDoubling( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // Save the initial top/random selection sizes if (n_generations == 0) { n_top__ = this->getNTop(); n_random__ = this->getNRandom(); prune_model_num__ = this->getPruneModelNum(); } // Adjust the population sizes if (models.size() >= this->getPopulationSize()) { this->setNTop(n_top__); this->setNRandom(n_random__); this->setNReplicatesPerModel(1); // doubling this->setRemoveIsolatedNodes(true); this->setPruneModelNum(prune_model_num__); this->setCheckCompleteModelInputToOutput(true); this->setNEpochsTraining(1001); // NOTE: this will be overwritten (so long as it is greater than 0) by the value of `ModelTrainer::n_epochs_training` during the call to `updateNEpochsTraining` this->setSelectModels(true); } else { this->setNTop(models.size()); this->setNRandom(models.size()); this->setNReplicatesPerModel(1); // doubling this->setRemoveIsolatedNodes(false); this->setPruneModelNum(0); this->setCheckCompleteModelInputToOutput(false); this->setNEpochsTraining(0); this->setSelectModels(false); } } template<typename TensorT, typename InterpreterT> inline void PopulationTrainerExperimental<TensorT, InterpreterT>::setTrainingStepsByModelSize(std::vector<Model<TensorT>>& models) { // Calculate the average model size TensorT mean_model_size = 0; for (Model<TensorT>& model : models) { int links = model.getLinksMap().size(); mean_model_size += links; } mean_model_size = mean_model_size / models.size(); // Adjust the number of training steps if (mean_model_size <= 8) this->setNEpochsTraining(100); else if (mean_model_size <= 16) this->setNEpochsTraining(200); else if (mean_model_size <= 32) this->setNEpochsTraining(400); else if (mean_model_size <= 64) this->setNEpochsTraining(800); } } #endif //EVONET_POPULATIONTRAINEREXPERIMENTAL_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_METABOLOMICSCLASSIFICATIONDATASIMULATOR_H #define EVONET_METABOLOMICSCLASSIFICATIONDATASIMULATOR_H // .h #include <EvoNet/simulator/BiochemicalDataSimulator.h> namespace EvoNet { template<typename TensorT> class MetabolomicsClassificationDataSimulator : public BiochemicalDataSimulator<TensorT> { public: void makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void readAndProcessMetabolomicsTrainingAndValidationData(int& n_reaction_ids_training, int& n_labels_training, int& n_component_group_names_training, int& n_reaction_ids_validation, int& n_labels_validation, int& n_component_group_names_validation, const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train, const std::string& meta_data_filename_train, const std::string& metabo_data_filename_test, const std::string& meta_data_filename_test, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& offline_linear_scale_input, const bool& offline_log_transform_input, const bool& offline_standardize_input, const bool& online_linear_scale_input, const bool& online_log_transform_input, const bool& online_standardize_input, int& n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int& n_epochs, const int& batch_size, const int& memory_size) override; }; template<typename TensorT> inline void MetabolomicsClassificationDataSimulator<TensorT>::makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int & n_epochs, const int & batch_size, const int & memory_size, const int & n_input_nodes, const int & n_loss_output_nodes, const int & n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes const int input_nodes = data_training.dimension(0); assert(n_input_nodes == input_nodes); assert(n_loss_output_nodes == labels_training_.size()); assert(n_metric_output_nodes == labels_training_.size()); // accuracy and precision assert(data_training.dimension(0) == features.size()); assert(data_training.dimension(1) == labels_training.size()); // initialize the Tensors this->input_data_training_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_training_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_training_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_training_.resize(batch_size, memory_size, n_epochs); // expand the training data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_training.dimension(1))), 1); const int over_expanded = data_training.dimension(1)*expansion_factor - batch_size * n_epochs; assert(batch_size * memory_size * n_epochs == data_training.dimension(1)*expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_training_expanded(data_training.dimension(0), data_training.dimension(1)*expansion_factor); Eigen::Tensor<std::string, 2> labels_training_expanded(data_training.dimension(1)*expansion_factor, 1); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i*data_training.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_training.dimension(0), data_training.dimension(1) }; data_training_expanded.slice(offset1, span1) = data_training; // Slices for the labels for (int j = 0; j < data_training.dimension(1); ++j) { labels_training_expanded(i*data_training.dimension(1) + j, 0) = labels_training.at(j); } //Eigen::array<Eigen::Index, 2> offset2 = { i*data_training.dimension(1), 0 }; //Eigen::array<Eigen::Index, 2> span2 = { data_training.dimension(1), 1 }; //Eigen::TensorMap<Eigen::Tensor<std::string, 2>> labels_2d(labels_training.data(), data_training.dimension(1), 1); //labels_training_expanded.slice(offset2, span2) = labels_2d; } // make the one-hot encodings Eigen::Tensor<TensorT, 2> one_hot_vec = OneHotEncoder<std::string, TensorT>(labels_training_expanded, this->labels_training_); //Eigen::Tensor<TensorT, 2> one_hot_vec_smoothed = one_hot_vec.unaryExpr(LabelSmoother<TensorT>(0.01, 0.01)); // optionally shuffle the data and labels if (shuffle_data_and_labels) { MakeShuffleMatrix<TensorT> shuffleMatrix(data_training.dimension(1) * expansion_factor, true); shuffleMatrix(data_training_expanded, true); shuffleMatrix.setShuffleMatrix(false); // re-orient for column with the same random indices shuffleMatrix(one_hot_vec, false); } // assign the input tensors auto data_training_expanded_4d = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), data_training.dimension(1)*expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_training.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_training_ = data_training_expanded_4d; //// Check that values of the data and input tensors are correctly aligned //Eigen::Tensor<TensorT, 1> data_training_head = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_training.dimension(0) })); //Eigen::Tensor<TensorT, 1> data_training_tail = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), data_training.dimension(1)*expansion_factor - over_expanded }) //).slice(Eigen::array<Eigen::Index, 2>({ 0, batch_size * memory_size * n_epochs - 1 }), // Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_training.dimension(0) })); //Eigen::Tensor<TensorT, 1> input_training_head = this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, data_training.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_training.dimension(0) })); //Eigen::Tensor<TensorT, 1> input_training_tail = this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, data_training.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_training.dimension(0) })); //std::cout << "data_training_head\n" << data_training_head << std::endl; //std::cout << "data_training_tail\n" << data_training_tail << std::endl; //for (int i = 0; i < data_training.dimension(0); ++i) { // assert(data_training_head(i) == input_training_head(i)); // assert(data_training_tail(i) == input_training_tail(i)); //} // assign the loss tensors auto one_hot_vec_4d = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_training.dimension(1)*expansion_factor - over_expanded, one_hot_vec.dimension(1) }) ).reshape(Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_epochs, int(labels_training_.size()) }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 0,1,3,2 })); this->loss_output_data_training_ = one_hot_vec_4d; //// Check that values of the labels and output tensors are correctly aligned //Eigen::Tensor<TensorT, 1> labels_training_head = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ 1, int(labels_training_.size()) }) //).reshape(Eigen::array<Eigen::Index, 1>({ int(labels_training_.size()) })); //Eigen::Tensor<TensorT, 1> labels_training_tail = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_training.dimension(1)*expansion_factor - over_expanded, one_hot_vec.dimension(1) }) //).slice(Eigen::array<Eigen::Index, 2>({ batch_size * memory_size * n_epochs - 1, 0 }), // Eigen::array<Eigen::Index, 2>({ 1, int(labels_training_.size()) }) //).reshape(Eigen::array<Eigen::Index, 1>({ int(labels_training_.size()) })); //Eigen::Tensor<TensorT, 1> loss_training_head = this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, int(labels_training_.size()), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ int(labels_training_.size()) })); //Eigen::Tensor<TensorT, 1> loss_training_tail = this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, int(labels_training_.size()), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ int(labels_training_.size()) })); //std::cout << "labels_training_head\n" << labels_training_head << std::endl; //std::cout << "labels_training_tail\n" << labels_training_tail << std::endl; //for (int i = 0; i < int(labels_training_.size()); ++i) { // assert(labels_training_head(i) == loss_training_head(i)); // assert(labels_training_tail(i) == loss_training_tail(i)); //} // assign the metric tensors this->metric_output_data_training_ = one_hot_vec_4d; } template<typename TensorT> inline void MetabolomicsClassificationDataSimulator<TensorT>::makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int & n_epochs, const int & batch_size, const int & memory_size, const int & n_input_nodes, const int & n_loss_output_nodes, const int & n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes const int input_nodes = data_validation.dimension(0); assert(n_input_nodes == input_nodes); assert(n_loss_output_nodes == labels_validation_.size()); assert(n_metric_output_nodes == labels_validation_.size()); // accuracy and precision assert(data_validation.dimension(0) == features.size()); assert(data_validation.dimension(1) == labels_validation.size()); // initialize the Tensors this->input_data_validation_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_validation_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_validation_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_validation_.resize(batch_size, memory_size, n_epochs); // expand the validation data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_validation.dimension(1))), 1); const int over_expanded = data_validation.dimension(1)*expansion_factor - batch_size * n_epochs; assert(batch_size * memory_size * n_epochs == data_validation.dimension(1)*expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_validation_expanded(data_validation.dimension(0), data_validation.dimension(1)*expansion_factor); Eigen::Tensor<std::string, 2> labels_validation_expanded(data_validation.dimension(1)*expansion_factor, 1); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i*data_validation.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_validation.dimension(0), data_validation.dimension(1) }; data_validation_expanded.slice(offset1, span1) = data_validation; // Slices for the labels for (int j = 0; j < data_validation.dimension(1); ++j) { labels_validation_expanded(i*data_validation.dimension(1) + j, 0) = labels_validation.at(j); } //Eigen::array<Eigen::Index, 2> offset2 = { i*data_validation.dimension(1), 0 }; //Eigen::array<Eigen::Index, 2> span2 = { data_validation.dimension(1), 1 }; //Eigen::TensorMap<Eigen::Tensor<std::string, 2>> labels_2d(labels_validation.data(), data_validation.dimension(1), 1); //labels_validation_expanded.slice(offset2, span2) = labels_2d; } // make the one-hot encodings Eigen::Tensor<TensorT, 2> one_hot_vec = OneHotEncoder<std::string, TensorT>(labels_validation_expanded, this->labels_validation_); //Eigen::Tensor<TensorT, 2> one_hot_vec_smoothed = one_hot_vec.unaryExpr(LabelSmoother<TensorT>(0.01, 0.01)); // optionally shuffle the data and labels if (shuffle_data_and_labels) { MakeShuffleMatrix<TensorT> shuffleMatrix(data_validation.dimension(1)*expansion_factor, true); shuffleMatrix(data_validation_expanded, true); shuffleMatrix.setShuffleMatrix(false); // re-orient for column with the same random indices shuffleMatrix(one_hot_vec, false); } // assign the input tensors auto data_validation_expanded_4d = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), data_validation.dimension(1)*expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_validation.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_validation_ = data_validation_expanded_4d; //// Check that values of the data and input tensors are correctly aligned //Eigen::Tensor<TensorT, 1> data_validation_head = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_validation.dimension(0) })); //Eigen::Tensor<TensorT, 1> data_validation_tail = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), data_validation.dimension(1)*expansion_factor - over_expanded }) //).slice(Eigen::array<Eigen::Index, 2>({ 0, batch_size * memory_size * n_epochs - 1 }), // Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_validation.dimension(0) })); //Eigen::Tensor<TensorT, 1> input_validation_head = this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, data_validation.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_validation.dimension(0) })); //Eigen::Tensor<TensorT, 1> input_validation_tail = this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, data_validation.dimension(0), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ data_validation.dimension(0) })); //std::cout << "data_validation_head\n" << data_validation_head << std::endl; //std::cout << "data_validation_tail\n" << data_validation_tail << std::endl; //for (int i = 0; i < data_validation.dimension(0); ++i) { // assert(data_validation_head(i) == input_validation_head(i)); // assert(data_validation_tail(i) == input_validation_tail(i)); //} // assign the loss tensors auto one_hot_vec_4d = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_validation.dimension(1)*expansion_factor - over_expanded, one_hot_vec.dimension(1) }) ).reshape(Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_epochs, int(labels_validation_.size()) }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 0,1,3,2 })); this->loss_output_data_validation_ = one_hot_vec_4d; //// Check that values of the labels and output tensors are correctly aligned //Eigen::Tensor<TensorT, 1> labels_validation_head = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ 1, int(labels_validation_.size()) }) //).reshape(Eigen::array<Eigen::Index, 1>({ int(labels_validation_.size()) })); //Eigen::Tensor<TensorT, 1> labels_validation_tail = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), // Eigen::array<Eigen::Index, 2>({ data_validation.dimension(1)*expansion_factor - over_expanded, one_hot_vec.dimension(1) }) //).slice(Eigen::array<Eigen::Index, 2>({ batch_size * memory_size * n_epochs - 1, 0 }), // Eigen::array<Eigen::Index, 2>({ 1, int(labels_validation_.size()) }) //).reshape(Eigen::array<Eigen::Index, 1>({ int(labels_validation_.size()) })); //Eigen::Tensor<TensorT, 1> loss_validation_head = this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, int(labels_validation_.size()), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ int(labels_validation_.size()) })); //Eigen::Tensor<TensorT, 1> loss_validation_tail = this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), // Eigen::array<Eigen::Index, 4>({ 1, 1, int(labels_validation_.size()), 1 }) //).reshape(Eigen::array<Eigen::Index, 1>({ int(labels_validation_.size()) })); //std::cout << "labels_validation_head\n" << labels_validation_head << std::endl; //std::cout << "labels_validation_tail\n" << labels_validation_tail << std::endl; //for (int i = 0; i < int(labels_validation_.size()); ++i) { // assert(labels_validation_head(i) == loss_validation_head(i)); // assert(labels_validation_tail(i) == loss_validation_tail(i)); //} // assign the metric tensors this->metric_output_data_validation_ = one_hot_vec_4d; } template<typename TensorT> inline void MetabolomicsClassificationDataSimulator<TensorT>::readAndProcessMetabolomicsTrainingAndValidationData(int & n_reaction_ids_training, int & n_labels_training, int & n_component_group_names_training, int & n_reaction_ids_validation, int & n_labels_validation, int & n_component_group_names_validation, const std::string & biochem_rxns_filename, const std::string & metabo_data_filename_train, const std::string & meta_data_filename_train, const std::string & metabo_data_filename_test, const std::string & meta_data_filename_test, const bool & use_concentrations, const bool & use_MARs, const bool & sample_values, const bool & iter_values, const bool & fill_sampling, const bool & fill_mean, const bool & fill_zero, const bool & apply_fold_change, const std::string & fold_change_ref, const TensorT & fold_change_log_base, const bool & offline_linear_scale_input, const bool & offline_log_transform_input, const bool & offline_standardize_input, const bool & online_linear_scale_input, const bool & online_log_transform_input, const bool & online_standardize_input, int & n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int & n_epochs, const int & batch_size, const int & memory_size) { // Read in the data and make the data matrices std::vector<std::string> labels_training; std::vector<std::string> features_training; Eigen::Tensor<TensorT, 2> data_training; std::vector<std::string> labels_validation; std::vector<std::string> features_validation; Eigen::Tensor<TensorT, 2> data_validation; this->readAndMakeMetabolomicsTrainingAndValidationDataMatrices(n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, features_training, data_training, labels_training, features_validation, data_validation, labels_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, use_concentrations, use_MARs, sample_values, iter_values, fill_sampling, fill_mean, fill_zero, apply_fold_change, fold_change_ref, fold_change_log_base, n_reps_per_sample, randomize_sample_group_names, n_epochs, batch_size, memory_size); // Make the training and validation data caches after an optional transformation step if (use_concentrations) { // Apply offline transformations this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, false, -1, -1, false, -1, -1); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, n_component_group_names_training, n_labels_training, n_labels_training, shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, n_component_group_names_validation, n_labels_validation, n_labels_validation, shuffle_data_and_labels); } else if (use_MARs) { // Apply offline transformations TensorT min_value = 1e-3; TensorT max_value = 1e3; if (offline_log_transform_input) { min_value = std::log(min_value); max_value = std::log(max_value); } this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, true, min_value, max_value, false, -1, -1); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, n_reaction_ids_training, n_labels_training, n_labels_training, shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, n_reaction_ids_validation, n_labels_validation, n_labels_validation, shuffle_data_and_labels); } } } #endif //EVONET_METABOLOMICSCLASSIFICATIONDATASIMULATOR_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_PEAKSIMULATOR_H #define EVONET_PEAKSIMULATOR_H // .h #include <EvoNet/simulator/EMGModel.h> #include <vector> #include <random> namespace EvoNet { /** @brief Peak simulator. This class generates a chromatogram or spectrum peak. The peak is modeled after an exponentially modified gaussian (EMG). References: <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>. (2011). "Reconstruction of chromatographic peaks using the exponentially modified Gaussian function". Journal of Chemometrics. 25 (7): 352. doi:10.1002/cem.1343 */ template <typename TensorT> class PeakSimulator { /** Notes on potential optimizations: 1. make a virtual class called DataSimulator 2. make a virtual class called simulate 3. make a virtual class called addNoise 4. setters/getters would be unique to each derived class */ public: PeakSimulator() = default; ///< Default constructor PeakSimulator(const TensorT& step_size_mu, const TensorT& step_size_sigma, const TensorT& window_start, const TensorT& window_end, const TensorT& noise_mu, const TensorT& noise_sigma, const TensorT& baseline_left, const TensorT& baseline_right, const TensorT& saturation_limit); ///< Explicit constructor ~PeakSimulator() = default; ///< Default destructor /** @brief calculate the points that define the left and right of the "actual" peak based on the fitted emg model points and set baselines. @param[in] x_IO A vector of x values representing time or m/z @param[in] y_IO A vector of y values representing the intensity at time t or m/z m @returns std::pair<TensorT, TensorT> of best left and right points for the peak */ std::pair<TensorT, TensorT> getBestLeftAndRight(std::vector<TensorT>& x_O, std::vector<TensorT>& y_O, const TensorT& rt, const TensorT& detection_threshold = 1e-2) const; /** @brief simulates two vector of points that correspond to x and y values that represent a peak @param[out] x_IO A vector of x values representing time or m/z @param[out] y_IO A vector of y values representing the intensity at time t or m/z m @param[in] emg An emg model class */ void simulatePeak(std::vector<TensorT>& x_O, std::vector<TensorT>& y_O, const EMGModel<TensorT>& emg) const; /** @brief Generates a range of values with noise sampled from a normal distribution @param[in] start Range start @param[in] step_mu Range mean step @param[in] step_sigma Range step standard deviation @param[in] end Range end @returns A vector of values from range start to end. */ static std::vector<TensorT> generateRangeWithNoise( const TensorT& start, const TensorT& step_mu, const TensorT& step_sigma, const TensorT& end); /** @brief Add random noise from a normal distribution to a vector of values to simulate detector noise. @param[in,out] array_IO Vector of values to add random noise @param[in] mean Mean of the normal distribution @param[in] std_dev Standard Deviation of the normal distribution @returns A vector of values with added random noise. */ static void addNoise( std::vector<TensorT>& array_IO, const TensorT& mean, const TensorT& std_dev); /** @brief Add a y offset (i.e., baseline) to a vector of values to simulate a rise in the baseline. @param[in] x_I Vector of time values @param[in,out] y_IO Vector of intensity values @param[in] baseline_left Left baseline offset @param[in] baseline_right Right baseline offse @param[in] peak_apex Time to divide left and right peak sides @returns A vector of values with added baselines. */ static void addBaseline( const std::vector<TensorT>& x_I, std::vector<TensorT>& y_IO, const TensorT& baseline_left, const TensorT& baseline_right, const TensorT& peak_apex); /** @brief Flatten the top of a peak to simulate a saturated peak. @param[in,out] array_IO Vector of values to add a saturation point to @param[in] saturation_limit Saturation limit of the simulated detector @returns A vector of values with a simulated saturation point. */ static void flattenPeak( std::vector<TensorT>& array_IO, const TensorT& saturation_limit); void setStepSizeMu(const TensorT& step_size_mu); ///< step_size_mu setter TensorT getStepSizeMu() const; ///< step_size_mu getter void setStepSizeSigma(const TensorT& step_size_mu); ///< step_size_sigma setter TensorT getStepSizeSigma() const; ///< step_size_sigma getter void setWindowStart(const TensorT& window_start); ///< window_start setter TensorT getWindowStart() const; ///< window_start getter void setWindowEnd(const TensorT& window_end); ///< window_end setter TensorT getWindowEnd() const; ///< window_end getter void setNoiseMu(const TensorT& noise_mu); ///< noise_mu setter TensorT getNoiseMu() const; ///< noise_mu getter void setNoiseSimga(const TensorT& noise_sigma); ///< noise_sigma setter TensorT getNoiseSigma() const; ///< noise_sigma getter void setBaselineLeft(const TensorT& baseline_left); ///< baseline_left setter TensorT getBaselineLeft() const; ///< baseline_left getter void setBaselineRight(const TensorT& baseline_right); ///< baseline_right setter TensorT getBaselineRight() const; ///< baseline_right getter void setSaturationLimit(const TensorT& saturation_limit); ///< saturation_limit setter TensorT getSaturationLimit() const; ///< saturation_limit getter private: TensorT step_size_mu_ = (TensorT)1.0; ///< The mean spacing between points TensorT step_size_sigma_ = (TensorT)0.001; ///< The standard deviation of spacing between points TensorT window_start_ = (TensorT)0.0; ///< Peak window start TensorT window_end_ = (TensorT)100.0; ///< Peak window end TensorT noise_mu_ = (TensorT)0.0; ///< Mean of random noise generated from a normal distribution TensorT noise_sigma_ = (TensorT)1.0; ///< Standard deviation of random noise generated from a normal distribution TensorT baseline_left_ = (TensorT)0.0; ///< Height of the left baseline TensorT baseline_right_ = (TensorT)0.0; ///< Height of the right baseline TensorT saturation_limit_ = (TensorT)1e6; ///< Maximum point height before peak saturation }; template <typename TensorT> PeakSimulator<TensorT>::PeakSimulator(const TensorT& step_size_mu, const TensorT& step_size_sigma, const TensorT& window_start, const TensorT& window_end, const TensorT& noise_mu, const TensorT& noise_sigma, const TensorT& baseline_left, const TensorT& baseline_right, const TensorT& saturation_limit) { step_size_mu_ = step_size_mu; step_size_sigma_ = step_size_sigma; window_start_ = window_start; window_end_ = window_end; noise_mu_ = noise_mu; noise_sigma_ = noise_sigma; baseline_left_ = baseline_left; baseline_right_ = baseline_right; saturation_limit_ = saturation_limit; } template <typename TensorT> void PeakSimulator<TensorT>::setStepSizeMu(const TensorT& step_size_mu) { step_size_mu_ = step_size_mu; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getStepSizeMu() const { return step_size_mu_; } template <typename TensorT> void PeakSimulator<TensorT>::setStepSizeSigma(const TensorT& step_size_sigma) { step_size_sigma_ = step_size_sigma; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getStepSizeSigma() const { return step_size_sigma_; } template <typename TensorT> void PeakSimulator<TensorT>::setWindowStart(const TensorT& window_start) { window_start_ = window_start; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getWindowStart() const { return window_start_; } template <typename TensorT> void PeakSimulator<TensorT>::setWindowEnd(const TensorT& window_end) { window_end_ = window_end; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getWindowEnd() const { return window_end_; } template <typename TensorT> void PeakSimulator<TensorT>::setNoiseMu(const TensorT& noise_mu) { noise_mu_ = noise_mu; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getNoiseMu() const { return noise_mu_; } template <typename TensorT> void PeakSimulator<TensorT>::setNoiseSimga(const TensorT& noise_sigma) { noise_sigma_ = noise_sigma; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getNoiseSigma() const { return noise_sigma_; } template <typename TensorT> void PeakSimulator<TensorT>::setBaselineLeft(const TensorT& baseline_left) { baseline_left_ = baseline_left; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getBaselineLeft() const { return baseline_left_; } template <typename TensorT> void PeakSimulator<TensorT>::setBaselineRight(const TensorT& baseline_right) { baseline_right_ = baseline_right; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getBaselineRight() const { return baseline_right_; } template <typename TensorT> void PeakSimulator<TensorT>::setSaturationLimit(const TensorT& saturation_limit) { saturation_limit_ = saturation_limit; } template <typename TensorT> TensorT PeakSimulator<TensorT>::getSaturationLimit() const { return saturation_limit_; } template <typename TensorT> std::vector<TensorT> PeakSimulator<TensorT>::generateRangeWithNoise( const TensorT& start, const TensorT& step_mu, const TensorT& step_sigma, const TensorT& end) { std::random_device rd{}; std::mt19937 gen{ rd() }; TensorT step_mu_used = step_mu; TensorT step_sigma_used = step_sigma; // TODO: improve defaults if (step_mu <= (TensorT)0) { std::cout << "Warning: mean of step size will generate negative values. A default mean of 1.0 and std_dev of 1e-9 will be used instead." << std::endl; step_mu_used = (TensorT)1.0; step_sigma_used = (TensorT)0; } else if (step_mu - (TensorT)5 * step_sigma <= (TensorT)0) { std::cout << "Warning: mean and std_dev of range step size may generate negative values. Reduce std_dev to at least 1/5 the mean of the step size. A default std_dev of 1e-9 will be used instead." << std::endl; step_sigma_used = (TensorT)0; } std::vector<TensorT> array; TensorT value = start; int cnt = 0; // checks to ensure that an infinite loop is not run if (step_sigma_used > (TensorT)0) { std::normal_distribution<> d{ step_mu_used, step_sigma_used }; while (value <= end || cnt > 1e6) { array.push_back(value); value += d(gen); // could recode to better handle rounding errors cnt += 1; } } else { while (value <= end || cnt > 1e6) { array.push_back(value); value += step_mu_used; // could recode to better handle rounding errors cnt += 1; } } return array; } template <typename TensorT> void PeakSimulator<TensorT>::addNoise( std::vector<TensorT>& array_IO, const TensorT& mean, const TensorT& std_dev) { std::random_device rd{}; std::mt19937 gen{ rd() }; if (std_dev > 0) { std::normal_distribution<> d{ mean, std_dev }; // add noise to a new array for (TensorT& value : array_IO) { value = value + d(gen); } } else { for (TensorT& value : array_IO) { value = value + mean; } } } template <typename TensorT> void PeakSimulator<TensorT>::addBaseline( const std::vector<TensorT>& x_I, std::vector<TensorT>& y_IO, const TensorT& baseline_left, const TensorT& baseline_right, const TensorT& peak_apex) { for (int i = 0; i < x_I.size(); ++i) { if (x_I[i] <= peak_apex) { y_IO[i] = std::max<TensorT>(baseline_left, y_IO[i]); } else { y_IO[i] = std::max<TensorT>(baseline_right, y_IO[i]); } } } template <typename TensorT> void PeakSimulator<TensorT>::flattenPeak( std::vector<TensorT>& array_IO, const TensorT& saturation_limit) { for (TensorT& value : array_IO) { value = (value > saturation_limit) ? saturation_limit : value; } } template<typename TensorT> inline std::pair<TensorT, TensorT> PeakSimulator<TensorT>::getBestLeftAndRight(std::vector<TensorT>& x_O, std::vector<TensorT>& y_O, const TensorT& rt, const TensorT& detection_threshold) const { TensorT best_left = (TensorT)0; TensorT best_right = (TensorT)0; // iterate from the left for (int i = 1; i < x_O.size() - 1; ++i) { if (y_O[i] > baseline_left_ + noise_sigma_ + detection_threshold) { best_left = x_O[i - 1]; break; } if (x_O[i] > rt) break; } // iterate from the right for (int i = x_O.size() - 2; i >= 0; --i) { if (y_O[i] > baseline_right_ + noise_sigma_ + detection_threshold) { best_right = x_O[i + 1]; break; } if (x_O[i] < rt) break; } return std::pair<TensorT, TensorT>(best_left, best_right); } template <typename TensorT> void PeakSimulator<TensorT>::simulatePeak( std::vector<TensorT>& x_O, std::vector<TensorT>& y_O, const EMGModel<TensorT>& emg) const { x_O.clear(); y_O.clear(); // make the time array x_O = generateRangeWithNoise(window_start_, step_size_mu_, step_size_sigma_, window_end_); // make the intensity array for (TensorT x : x_O) { y_O.push_back(emg.PDF(x)); } // add a baseline to the intensity array addBaseline(x_O, y_O, baseline_left_, baseline_right_, emg.getMu()); // add noise to the intensity array addNoise(y_O, noise_mu_, noise_sigma_); // add saturation limit flattenPeak(y_O, saturation_limit_); } } #endif //EVONET_PEAKSIMULATOR_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelFile test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/ModelFile.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(ModelFile1) Model<float> makeModel1() { /** * Directed Acyclic Graph Toy Network Model */ Node<float> i1, i2, h1, h2, o1, o2, b1, b2; Link l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4; Weight<float> w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4; Model<float> model1; // Toy network: 1 hidden layer, fully connected, DAG i1 = Node<float>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); i2 = Node<float>("1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("2", NodeType::hidden, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h2 = Node<float>("3", NodeType::hidden, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("4", NodeType::output, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o2 = Node<float>("5", NodeType::output, NodeStatus::deactivated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<float>("6", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<float>("7", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1 = Weight<float>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1.setWeight(1); w2 = Weight<float>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2.setWeight(2); w3 = Weight<float>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3.setWeight(3); w4 = Weight<float>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb1 = Weight<float>("4", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb2 = Weight<float>("5", weight_init, solver); // input layer + bias l1 = Link("0", "0", "2", "0"); l2 = Link("1", "0", "3", "1"); l3 = Link("2", "1", "2", "2"); l4 = Link("3", "1", "3", "3"); lb1 = Link("4", "6", "2", "4"); lb2 = Link("5", "6", "3", "5"); // weights weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w5 = Weight<float>("6", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w6 = Weight<float>("7", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w7 = Weight<float>("8", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w8 = Weight<float>("9", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb3 = Weight<float>("10", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb4 = Weight<float>("11", weight_init, solver); // hidden layer + bias l5 = Link("6", "2", "4", "6"); l6 = Link("7", "2", "5", "7"); l7 = Link("8", "3", "4", "8"); l8 = Link("9", "3", "5", "9"); lb3 = Link("10", "7", "4", "10"); lb4 = Link("11", "7", "5", "11"); model1.setId(1); model1.setName("1"); model1.addNodes({ i1, i2, h1, h2, o1, o2, b1, b2 }); model1.addWeights({ w1, w2, w3, w4, wb1, wb2, w5, w6, w7, w8, wb3, wb4 }); model1.addLinks({ l1, l2, l3, l4, lb1, lb2, l5, l6, l7, l8, lb3, lb4 }); model1.setInputAndOutputNodes(); return model1; } BOOST_AUTO_TEST_CASE(constructor) { ModelFile<float>* ptr = nullptr; ModelFile<float>* nullPointer = nullptr; ptr = new ModelFile<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ModelFile<float>* ptr = nullptr; ptr = new ModelFile<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(storeModelDot) { ModelFile<float> data; std::string filename = "ModelFileTest.gv"; Model<float> model1 = makeModel1(); data.storeModelDot(filename, model1); } BOOST_AUTO_TEST_CASE(loadModelCsv) { ModelFile<float> data; Model<float> model_test; model_test.setId(1); model_test.setName("1"); std::string filename_nodes = "ModelNodeFileTest.csv"; std::string filename_links = "ModelLinkFileTest.csv"; std::string filename_weights = "ModelWeightFileTest.csv"; Model<float> model1 = makeModel1(); model1.setInputAndOutputNodes(); data.storeModelCsv(filename_nodes, filename_links, filename_weights, model1); data.loadModelCsv(filename_nodes, filename_links, filename_weights, model_test); BOOST_CHECK_EQUAL(model_test.getId(), model1.getId()); BOOST_CHECK_EQUAL(model_test.getName(), model1.getName()); BOOST_CHECK(model_test.getNodes() == model1.getNodes()); BOOST_CHECK(model_test.getLinks() == model1.getLinks()); //BOOST_CHECK(model_test.getWeights() == model1.getWeights()); // Broke BOOST_CHECK(model_test.getInputNodes().size() == model1.getInputNodes().size()); // Not sure why this fails BOOST_CHECK(model_test.getOutputNodes().size() == model1.getOutputNodes().size()); // Not sure why this fails //BOOST_CHECK(model_test == model1); // Not sure why this fails } BOOST_AUTO_TEST_CASE(loadModelBinary) { ModelFile<float> data; Model<float> model_test; std::string filename = "ModelFileTest.binary"; Model<float> model1 = makeModel1(); model1.setInputAndOutputNodes(); data.storeModelBinary(filename, model1); data.loadModelBinary(filename, model_test); BOOST_CHECK_EQUAL(model_test.getId(), model1.getId()); BOOST_CHECK_EQUAL(model_test.getName(), model1.getName()); BOOST_CHECK(model_test.getNodes() == model1.getNodes()); BOOST_CHECK(model_test.getLinks() == model1.getLinks()); BOOST_CHECK(model_test.getWeights() == model1.getWeights()); BOOST_CHECK(model_test.getInputNodes().size() == model1.getInputNodes().size()); // Not sure why this fails BOOST_CHECK(model_test.getOutputNodes().size() == model1.getOutputNodes().size()); // Not sure why this fails //BOOST_CHECK(model_test == model1); // Not sure why this fails } BOOST_AUTO_TEST_CASE(loadWeightValuesBinary) { // Store the binarized model ModelFile<float> data; std::string filename = "ModelFileTest.binary"; Model<float> model1 = makeModel1(); model1.setInputAndOutputNodes(); data.storeModelBinary(filename, model1); // Read in the weight values std::map<std::string, std::shared_ptr<Weight<float>>> weights_test; for (int i = 0; i < 3; ++i) { auto weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); auto solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); std::shared_ptr<Weight<float>> weight(new Weight<float>( std::to_string(i), weight_init, solver)); weight->setModuleName(std::to_string(i)); weight->setWeight(0); weights_test.emplace(weight->getName(), weight); } data.loadWeightValuesBinary(filename, weights_test); // Test that the weight values match for (int i = 0; i < 3; ++i) { BOOST_CHECK_EQUAL(model1.weights_.at(std::to_string(i))->getWeight(), weights_test.at(std::to_string(i))->getWeight()); BOOST_CHECK(!weights_test.at(std::to_string(i))->getInitWeight()); } } BOOST_AUTO_TEST_SUITE_END()<file_sep>EvoNet: Evolving end-to-end computational networks ########################################################################################################## |docs| |circleci| |license| .. begin_badges .. |docs| image:: https://readthedocs.com/projects/evonet/badge/?version=latest :alt: Documentation Status :target: https://evonet.readthedocs.io/en/latest/?badge=latest .. |circleci| image:: https://circleci.com/gh/dmccloskey/EvoNet.svg?branch=develop?style=svg :alt: CircleCI Build Status (Windows, Linux & macOS) :target: https://circleci.com/gh/dmccloskey/EvoNet .. |license| image:: https://img.shields.io/github/license/dmccloskey/EvoNet.svg :alt: License MIT Clause :target: https://github.com/dmccloskey/EvoNet/blob/develop/LICENSE .. end_badges .. begin_introduction EvoNet aims to provide a machine learning framework that can optimize both network weights AND network structure simultaneously while still taking advantage of the latest hardware acceleration technology (Fig 1). .. image:: images/Schematic_GraphNetwork.png Currently, network structure is optimized using an evolutionary algorithm over network node integration and activation functions and over node connections (Fig 2), while network weights are optimized using standard backpropogation. .. image:: images/Schematic_mutationOperations.png EvoNet is written in C++ and is optimized for hardware acceleration using native threading and CUDA GPU technology. .. end_introduction .. role:: bash(code) :language: bash Features ========================================================================================================== .. begin_examples Examples ---------------------------------------------------------------------------------------------------------- - Full computational networks that include common neural network architecture layers including fully connected, recurrent, LSTM, convolution, softmax, and many other. - Full computational networks for the analysis and simulation of biochemical networks (work in progress...). - Benchmarks on various MNIST related classification and reconstruction tasks. - Benchmarks using various Omics data (work in progress...). - All examples can be found in the `./src/examples` directory, and can be built using the provided `CMAKE` scripts. .. begin_examples .. begin_features Features ---------------------------------------------------------------------------------------------------------- - End-to-end network building and learning framework that is amenable to back propagation that is made possible by a novel network to tensor layer algorithm. - Automated neural network architecture and module design that is made possible by a novel ensemble model training, testing, and pruning routines in combination with pre-packed and customizable mutation operators. - Fine-tuning or specialization of existing architectures for a task using the EvoNet workflow. - Suite of hand-tuned operators for tensor math, node integration, node activation, network loss functions, and network metric functions that utilize parallelization on CPU or GPU hardware. .. end_features .. begin_code Code ---------------------------------------------------------------------------------------------------------- - Unit tests are provided for (almost) all classes and methods on both CPU and GPU. - Docstrings and documentation is provided for (almost) all classes and methods. - Runs on Windows, Linux, and MacOS. Please note that Windows is currently the most well tested. - All unit tests can be found in the `./src/tests` directory, and can be ran using `CTest`. .. end_code<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE MetricFunctionTensor test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/MetricFunctionTensor.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(MetricFunctionTensor1) /** AccuracyBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorAccuracyBCOp) { AccuracyBCTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; AccuracyBCTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorAccuracyBCOp) { AccuracyBCTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new AccuracyBCTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionAccuracyBCOp) { AccuracyBCTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 0, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 0, 1}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** AccuracyMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorAccuracyMCMicroOp) { AccuracyMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; AccuracyMCMicroTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorAccuracyMCMicroOp) { AccuracyMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new AccuracyMCMicroTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionAccuracyMCMicroOp) { AccuracyMCMicroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.75, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** AccuracyMCMacroOp Tests */ BOOST_AUTO_TEST_CASE(constructorAccuracyMCMacroOp) { AccuracyMCMacroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; AccuracyMCMacroTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorAccuracyMCMacroOp) { AccuracyMCMacroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new AccuracyMCMacroTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionAccuracyMCMacroOp) { AccuracyMCMacroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** PrecisionBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorPrecisionBCOp) { PrecisionBCTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; PrecisionBCTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorPrecisionBCOp) { PrecisionBCTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new PrecisionBCTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionPrecisionBCOp) { PrecisionBCTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 0, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 0, 1}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.25, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** PrecisionMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorPrecisionMCMicroOp) { PrecisionMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; PrecisionMCMicroTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorPrecisionMCMicroOp) { PrecisionMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new PrecisionMCMicroTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionPrecisionMCMicroOp) { PrecisionMCMicroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** PrecisionMCMacroOp Tests */ BOOST_AUTO_TEST_CASE(constructorPrecisionMCMacroOp) { PrecisionMCMacroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; PrecisionMCMacroTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorPrecisionMCMacroOp) { PrecisionMCMacroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new PrecisionMCMacroTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionPrecisionMCMacroOp) { PrecisionMCMacroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** RecallBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorRecallBCOp) { RecallBCTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; RecallBCTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorRecallBCOp) { RecallBCTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new RecallBCTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionRecallBCOp) { RecallBCTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 0, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 0, 1}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** RecallMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorRecallMCMicroOp) { RecallMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; RecallMCMicroTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorRecallMCMicroOp) { RecallMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new RecallMCMicroTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionRecallMCMicroOp) { RecallMCMicroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** RecallMCMacroOp Tests */ BOOST_AUTO_TEST_CASE(constructorRecallMCMacroOp) { RecallMCMacroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; RecallMCMacroTensorOp<float, Eigen::DefaultDevice>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorRecallMCMacroOp) { RecallMCMacroTensorOp<float, Eigen::DefaultDevice>* ptrReLU = nullptr; ptrReLU = new RecallMCMacroTensorOp<float, Eigen::DefaultDevice>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(operationfunctionRecallMCMacroOp) { RecallMCMacroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** PredictionBiasOp Tests */ BOOST_AUTO_TEST_CASE(constructorPredictionBiasOp) { PredictionBiasTensorOp<float, Eigen::DefaultDevice>* ptrPredictionBias = nullptr; PredictionBiasTensorOp<float, Eigen::DefaultDevice>* nullPointerPredictionBias = nullptr; BOOST_CHECK_EQUAL(ptrPredictionBias, nullPointerPredictionBias); } BOOST_AUTO_TEST_CASE(destructorPredictionBiasOp) { PredictionBiasTensorOp<float, Eigen::DefaultDevice>* ptrPredictionBias = nullptr; ptrPredictionBias = new PredictionBiasTensorOp<float, Eigen::DefaultDevice>(); delete ptrPredictionBias; } BOOST_AUTO_TEST_CASE(operationfunctionPredictionBiasOp) { PredictionBiasTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** F1ScoreBCOp Tests */ BOOST_AUTO_TEST_CASE(constructorF1ScoreBCOp) { F1ScoreBCTensorOp<float, Eigen::DefaultDevice>* ptrF1Score = nullptr; F1ScoreBCTensorOp<float, Eigen::DefaultDevice>* nullPointerF1Score = nullptr; BOOST_CHECK_EQUAL(ptrF1Score, nullPointerF1Score); } BOOST_AUTO_TEST_CASE(destructorF1ScoreBCOp) { F1ScoreBCTensorOp<float, Eigen::DefaultDevice>* ptrF1Score = nullptr; ptrF1Score = new F1ScoreBCTensorOp<float, Eigen::DefaultDevice>(); delete ptrF1Score; } BOOST_AUTO_TEST_CASE(operationfunctionF1ScoreBCOp) { F1ScoreBCTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{1, 0, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 0, 1}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0, 1e-4); // FIXME BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** F1ScoreMCMicroOp Tests */ BOOST_AUTO_TEST_CASE(constructorF1ScoreMCMicroOp) { F1ScoreMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrF1Score = nullptr; F1ScoreMCMicroTensorOp<float, Eigen::DefaultDevice>* nullPointerF1Score = nullptr; BOOST_CHECK_EQUAL(ptrF1Score, nullPointerF1Score); } BOOST_AUTO_TEST_CASE(destructorF1ScoreMCMicroOp) { F1ScoreMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrF1Score = nullptr; ptrF1Score = new F1ScoreMCMicroTensorOp<float, Eigen::DefaultDevice>(); delete ptrF1Score; } BOOST_AUTO_TEST_CASE(operationfunctionF1ScoreMCMicroOp) { F1ScoreMCMicroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 0, 1e-4); // FIXME BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** F1ScoreMCMacroOp Tests */ BOOST_AUTO_TEST_CASE(constructorF1ScoreMCMacroOp) { F1ScoreMCMacroTensorOp<float, Eigen::DefaultDevice>* ptrF1Score = nullptr; F1ScoreMCMacroTensorOp<float, Eigen::DefaultDevice>* nullPointerF1Score = nullptr; BOOST_CHECK_EQUAL(ptrF1Score, nullPointerF1Score); } BOOST_AUTO_TEST_CASE(destructorF1ScoreMCMacroOp) { F1ScoreMCMacroTensorOp<float, Eigen::DefaultDevice>* ptrF1Score = nullptr; ptrF1Score = new F1ScoreMCMacroTensorOp<float, Eigen::DefaultDevice>(); delete ptrF1Score; } BOOST_AUTO_TEST_CASE(operationfunctionF1ScoreMCMacroOp) { F1ScoreMCMacroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.66666667, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** AUROCOp Tests */ BOOST_AUTO_TEST_CASE(constructorAUROCOp) { AUROCTensorOp<float, Eigen::DefaultDevice>* ptrAUROC = nullptr; AUROCTensorOp<float, Eigen::DefaultDevice>* nullPointerAUROC = nullptr; BOOST_CHECK_EQUAL(ptrAUROC, nullPointerAUROC); } BOOST_AUTO_TEST_CASE(destructorAUROCOp) { AUROCTensorOp<float, Eigen::DefaultDevice>* ptrAUROC = nullptr; ptrAUROC = new AUROCTensorOp<float, Eigen::DefaultDevice>(); delete ptrAUROC; } BOOST_AUTO_TEST_CASE(operationfunctionAUROCOp) { AUROCTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MCCBCTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMCCBCTensorOp) { MCCBCTensorOp<float, Eigen::DefaultDevice>* ptrMCC = nullptr; MCCBCTensorOp<float, Eigen::DefaultDevice>* nullPointerMCC = nullptr; BOOST_CHECK_EQUAL(ptrMCC, nullPointerMCC); } BOOST_AUTO_TEST_CASE(destructorMCCBCTensorOp) { MCCBCTensorOp<float, Eigen::DefaultDevice>* ptrMCC = nullptr; ptrMCC = new MCCBCTensorOp<float, Eigen::DefaultDevice>(); delete ptrMCC; } BOOST_AUTO_TEST_CASE(operationfunctionMCCBCTensorOp) { MCCBCTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MCCMCMicroTensorOp Tests */ BOOST_AUTO_TEST_CASE(constructorMCCMCMicroTensorOp) { MCCMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrMCC = nullptr; MCCMCMicroTensorOp<float, Eigen::DefaultDevice>* nullPointerMCC = nullptr; BOOST_CHECK_EQUAL(ptrMCC, nullPointerMCC); } BOOST_AUTO_TEST_CASE(destructorMCCMCMicroTensorOp) { MCCMCMicroTensorOp<float, Eigen::DefaultDevice>* ptrMCC = nullptr; ptrMCC = new MCCMCMicroTensorOp<float, Eigen::DefaultDevice>(); delete ptrMCC; } BOOST_AUTO_TEST_CASE(operationfunctionMCCMCMicroTensorOp) { MCCMCMicroTensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); //BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 0), 0.5, 1e-4); //BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); //BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** MAEOp Tests */ BOOST_AUTO_TEST_CASE(constructorMAEOp) { MAETensorOp<float, Eigen::DefaultDevice>* ptrMAE = nullptr; MAETensorOp<float, Eigen::DefaultDevice>* nullPointerMAE = nullptr; BOOST_CHECK_EQUAL(ptrMAE, nullPointerMAE); } BOOST_AUTO_TEST_CASE(destructorMAEOp) { MAETensorOp<float, Eigen::DefaultDevice>* ptrMAE = nullptr; ptrMAE = new MAETensorOp<float, Eigen::DefaultDevice>(); delete ptrMAE; } BOOST_AUTO_TEST_CASE(operationfunctionMAEOp) { MAETensorOp<float, Eigen::DefaultDevice> operation; const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); float error_ptr[] = { 0, 0, 0, 0 }; Eigen::DefaultDevice device; operation(y_pred.data(), y_true.data(), error_ptr, batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); Eigen::TensorMap<Eigen::Tensor<float, 2>> error(error_ptr, n_metrics, memory_size); BOOST_CHECK_CLOSE(error(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 0), 1.5, 1e-4); BOOST_CHECK_CLOSE(error(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error(1, 1), 0, 1e-4); } /** CosineSimilarityOp Tests */ BOOST_AUTO_TEST_CASE(constructorCosineSimilarityOp) { CosineSimilarityTensorOp<float, Eigen::DefaultDevice>* ptrCosineSimilarity = nullptr; CosineSimilarityTensorOp<float, Eigen::DefaultDevice>* nullPointerCosineSimilarity = nullptr; BOOST_CHECK_EQUAL(ptrCosineSimilarity, nullPointerCosineSimilarity); } BOOST_AUTO_TEST_CASE(destructorCosineSimilarityOp) { CosineSimilarityTensorOp<float, Eigen::DefaultDevice>* ptrCosineSimilarity = nullptr; ptrCosineSimilarity = new CosineSimilarityTensorOp<float, Eigen::DefaultDevice>(); delete ptrCosineSimilarity; } BOOST_AUTO_TEST_CASE(operationfunctionCosineSimilarityOp) { const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{0, 1, 2, 3}, {0, 0, 0, 0}} }); Eigen::DefaultDevice device; CosineSimilarityTensorOp<float, Eigen::DefaultDevice> operation_sum(std::string("Sum")); Eigen::Tensor<float, 2> error_sum(n_metrics, memory_size); error_sum.setZero(); operation_sum(y_pred.data(), y_true.data(), error_sum.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_sum(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 0), 0.801783681, 1e-4); BOOST_CHECK_CLOSE(error_sum(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 1), 0, 1e-4); CosineSimilarityTensorOp<float, Eigen::DefaultDevice> operation_mean(std::string("Mean")); Eigen::Tensor<float, 2> error_mean(n_metrics, memory_size); error_mean.setZero(); operation_mean(y_pred.data(), y_true.data(), error_mean.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_mean(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 0), 0.40089184, 1e-4); BOOST_CHECK_CLOSE(error_mean(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 1), 0, 1e-4); CosineSimilarityTensorOp<float, Eigen::DefaultDevice> operation_var(std::string("Var")); Eigen::Tensor<float, 2> error_var(n_metrics, memory_size); error_var.setZero(); operation_var(y_pred.data(), y_true.data(), error_var.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_var(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 0), 0.321428537, 1e-4); BOOST_CHECK_CLOSE(error_var(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 1), 0, 1e-4); } /** PearsonROp Tests */ BOOST_AUTO_TEST_CASE(constructorPearsonROp) { PearsonRTensorOp<float, Eigen::DefaultDevice>* ptrPearsonR = nullptr; PearsonRTensorOp<float, Eigen::DefaultDevice>* nullPointerPearsonR = nullptr; BOOST_CHECK_EQUAL(ptrPearsonR, nullPointerPearsonR); } BOOST_AUTO_TEST_CASE(destructorPearsonROp) { PearsonRTensorOp<float, Eigen::DefaultDevice>* ptrPearsonR = nullptr; ptrPearsonR = new PearsonRTensorOp<float, Eigen::DefaultDevice>(); delete ptrPearsonR; } BOOST_AUTO_TEST_CASE(operationfunctionPearsonROp) { const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{2, 3, 2, 3}, {0, 0, 0, 0}} }); Eigen::DefaultDevice device; PearsonRTensorOp<float, Eigen::DefaultDevice> operation_sum(std::string("Sum")); Eigen::Tensor<float, 2> error_sum(n_metrics, memory_size); error_sum.setZero(); operation_sum(y_pred.data(), y_true.data(), error_sum.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_sum(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 0), 0.197246432, 1e-4); BOOST_CHECK_CLOSE(error_sum(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 1), 0, 1e-4); PearsonRTensorOp<float, Eigen::DefaultDevice> operation_mean(std::string("Mean")); Eigen::Tensor<float, 2> error_mean(n_metrics, memory_size); error_mean.setZero(); operation_mean(y_pred.data(), y_true.data(), error_mean.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_mean(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 0), 0.0986232162, 1e-4); BOOST_CHECK_CLOSE(error_mean(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 1), 0, 1e-4); PearsonRTensorOp<float, Eigen::DefaultDevice> operation_var(std::string("Var")); Eigen::Tensor<float, 2> error_var(n_metrics, memory_size); error_var.setZero(); operation_var(y_pred.data(), y_true.data(), error_var.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_var(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 0), 0.913880289, 1e-4); BOOST_CHECK_CLOSE(error_var(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 1), 0, 1e-4); } /** EuclideanDistOp Tests */ BOOST_AUTO_TEST_CASE(constructorEuclideanDistOp) { EuclideanDistTensorOp<float, Eigen::DefaultDevice>* ptrEuclideanDist = nullptr; EuclideanDistTensorOp<float, Eigen::DefaultDevice>* nullPointerEuclideanDist = nullptr; BOOST_CHECK_EQUAL(ptrEuclideanDist, nullPointerEuclideanDist); } BOOST_AUTO_TEST_CASE(destructorEuclideanDistOp) { EuclideanDistTensorOp<float, Eigen::DefaultDevice>* ptrEuclideanDist = nullptr; ptrEuclideanDist = new EuclideanDistTensorOp<float, Eigen::DefaultDevice>(); delete ptrEuclideanDist; } BOOST_AUTO_TEST_CASE(operationfunctionEuclideanDistOp) { const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{2, 3, 2, 3}, {0, 0, 0, 0}} }); Eigen::DefaultDevice device; EuclideanDistTensorOp<float, Eigen::DefaultDevice> operation_sum(std::string("Sum")); Eigen::Tensor<float, 2> error_sum(n_metrics, memory_size); error_sum.setZero(); operation_sum(y_pred.data(), y_true.data(), error_sum.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_sum(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 0), 7.79583168, 1e-4); BOOST_CHECK_CLOSE(error_sum(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 1), 0, 1e-4); EuclideanDistTensorOp<float, Eigen::DefaultDevice> operation_mean(std::string("Mean")); Eigen::Tensor<float, 2> error_mean(n_metrics, memory_size); error_mean.setZero(); operation_mean(y_pred.data(), y_true.data(), error_mean.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_mean(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 0), 3.89791584, 1e-4); BOOST_CHECK_CLOSE(error_mean(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 1), 0, 1e-4); EuclideanDistTensorOp<float, Eigen::DefaultDevice> operation_var(std::string("Var")); Eigen::Tensor<float, 2> error_var(n_metrics, memory_size); error_var.setZero(); operation_var(y_pred.data(), y_true.data(), error_var.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_var(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 0), 1.61250567, 1e-4); BOOST_CHECK_CLOSE(error_var(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 1), 0, 1e-4); } /** ManhattanDistOp Tests */ BOOST_AUTO_TEST_CASE(constructorManhattanDistOp) { ManhattanDistTensorOp<float, Eigen::DefaultDevice>* ptrManhattanDist = nullptr; ManhattanDistTensorOp<float, Eigen::DefaultDevice>* nullPointerManhattanDist = nullptr; BOOST_CHECK_EQUAL(ptrManhattanDist, nullPointerManhattanDist); } BOOST_AUTO_TEST_CASE(destructorManhattanDistOp) { ManhattanDistTensorOp<float, Eigen::DefaultDevice>* ptrManhattanDist = nullptr; ptrManhattanDist = new ManhattanDistTensorOp<float, Eigen::DefaultDevice>(); delete ptrManhattanDist; } BOOST_AUTO_TEST_CASE(operationfunctionManhattanDistOp) { const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{2, 3, 2, 3}, {0, 0, 0, 0}} }); Eigen::DefaultDevice device; ManhattanDistTensorOp<float, Eigen::DefaultDevice> operation_sum(std::string("Sum")); Eigen::Tensor<float, 2> error_sum(n_metrics, memory_size); error_sum.setZero(); operation_sum(y_pred.data(), y_true.data(), error_sum.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_sum(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 0), 14, 1e-4); BOOST_CHECK_CLOSE(error_sum(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 1), 0, 1e-4); ManhattanDistTensorOp<float, Eigen::DefaultDevice> operation_mean(std::string("Mean")); Eigen::Tensor<float, 2> error_mean(n_metrics, memory_size); error_mean.setZero(); operation_mean(y_pred.data(), y_true.data(), error_mean.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_mean(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 0), 7, 1e-4); BOOST_CHECK_CLOSE(error_mean(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 1), 0, 1e-4); ManhattanDistTensorOp<float, Eigen::DefaultDevice> operation_var(std::string("Var")); Eigen::Tensor<float, 2> error_var(n_metrics, memory_size); error_var.setZero(); operation_var(y_pred.data(), y_true.data(), error_var.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_var(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 0), 8, 1e-4); BOOST_CHECK_CLOSE(error_var(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 1), 0, 1e-4); } /** JeffreysAndMatusitaDistOp Tests */ BOOST_AUTO_TEST_CASE(constructorJeffreysAndMatusitaDistOp) { JeffreysAndMatusitaDistTensorOp<float, Eigen::DefaultDevice>* ptrJeffreysAndMatusitaDist = nullptr; JeffreysAndMatusitaDistTensorOp<float, Eigen::DefaultDevice>* nullPointerJeffreysAndMatusitaDist = nullptr; BOOST_CHECK_EQUAL(ptrJeffreysAndMatusitaDist, nullPointerJeffreysAndMatusitaDist); } BOOST_AUTO_TEST_CASE(destructorJeffreysAndMatusitaDistOp) { JeffreysAndMatusitaDistTensorOp<float, Eigen::DefaultDevice>* ptrJeffreysAndMatusitaDist = nullptr; ptrJeffreysAndMatusitaDist = new JeffreysAndMatusitaDistTensorOp<float, Eigen::DefaultDevice>(); delete ptrJeffreysAndMatusitaDist; } BOOST_AUTO_TEST_CASE(operationfunctionJeffreysAndMatusitaDistOp) { const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{2, 3, 2, 3}, {0, 0, 0, 0}} }); Eigen::DefaultDevice device; JeffreysAndMatusitaDistTensorOp<float, Eigen::DefaultDevice> operation_sum(std::string("Sum")); Eigen::Tensor<float, 2> error_sum(n_metrics, memory_size); error_sum.setZero(); operation_sum(y_pred.data(), y_true.data(), error_sum.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_sum(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 0), 4.7389946, 1e-4); BOOST_CHECK_CLOSE(error_sum(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 1), 0, 1e-4); JeffreysAndMatusitaDistTensorOp<float, Eigen::DefaultDevice> operation_mean(std::string("Mean")); Eigen::Tensor<float, 2> error_mean(n_metrics, memory_size); error_mean.setZero(); operation_mean(y_pred.data(), y_true.data(), error_mean.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_mean(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 0), 2.3694973, 1e-4); BOOST_CHECK_CLOSE(error_mean(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 1), 0, 1e-4); JeffreysAndMatusitaDistTensorOp<float, Eigen::DefaultDevice> operation_var(std::string("Var")); Eigen::Tensor<float, 2> error_var(n_metrics, memory_size); error_var.setZero(); operation_var(y_pred.data(), y_true.data(), error_var.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_var(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 0), 0.478435606, 1e-4); BOOST_CHECK_CLOSE(error_var(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 1), 0, 1e-4); } /** LogarithmicDistOp Tests */ BOOST_AUTO_TEST_CASE(constructorLogarithmicDistOp) { LogarithmicDistTensorOp<float, Eigen::DefaultDevice>* ptrLogarithmicDist = nullptr; LogarithmicDistTensorOp<float, Eigen::DefaultDevice>* nullPointerLogarithmicDist = nullptr; BOOST_CHECK_EQUAL(ptrLogarithmicDist, nullPointerLogarithmicDist); } BOOST_AUTO_TEST_CASE(destructorLogarithmicDistOp) { LogarithmicDistTensorOp<float, Eigen::DefaultDevice>* ptrLogarithmicDist = nullptr; ptrLogarithmicDist = new LogarithmicDistTensorOp<float, Eigen::DefaultDevice>(); delete ptrLogarithmicDist; } BOOST_AUTO_TEST_CASE(operationfunctionLogarithmicDistOp) { const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 0, 0, 0}, {1, 0, 0, 0} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{2, 3, 2, 3}, {0, 0, 0, 0}} }); Eigen::DefaultDevice device; LogarithmicDistTensorOp<float, Eigen::DefaultDevice> operation_sum(std::string("Sum")); Eigen::Tensor<float, 2> error_sum(n_metrics, memory_size); error_sum.setZero(); operation_sum(y_pred.data(), y_true.data(), error_sum.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_sum(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 0), 3.58351898, 1e-4); BOOST_CHECK_CLOSE(error_sum(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 1), 0, 1e-4); LogarithmicDistTensorOp<float, Eigen::DefaultDevice> operation_mean(std::string("Mean")); Eigen::Tensor<float, 2> error_mean(n_metrics, memory_size); error_mean.setZero(); operation_mean(y_pred.data(), y_true.data(), error_mean.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_mean(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 0), 1.79175949, 1e-4); BOOST_CHECK_CLOSE(error_mean(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 1), 0, 1e-4); LogarithmicDistTensorOp<float, Eigen::DefaultDevice> operation_var(std::string("Var")); Eigen::Tensor<float, 2> error_var(n_metrics, memory_size); error_var.setZero(); operation_var(y_pred.data(), y_true.data(), error_var.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_var(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 1), 0, 1e-4); } /** PercentDifferenceOp Tests */ BOOST_AUTO_TEST_CASE(constructorPercentDifferenceOp) { PercentDifferenceTensorOp<float, Eigen::DefaultDevice>* ptrPercentDifference = nullptr; PercentDifferenceTensorOp<float, Eigen::DefaultDevice>* nullPointerPercentDifference = nullptr; BOOST_CHECK_EQUAL(ptrPercentDifference, nullPointerPercentDifference); } BOOST_AUTO_TEST_CASE(destructorPercentDifferenceOp) { PercentDifferenceTensorOp<float, Eigen::DefaultDevice>* ptrPercentDifference = nullptr; ptrPercentDifference = new PercentDifferenceTensorOp<float, Eigen::DefaultDevice>(); delete ptrPercentDifference; } BOOST_AUTO_TEST_CASE(operationfunctionPercentDifferenceOp) { const int memory_size = 2; const int batch_size = 2; const int layer_size = 4; const int n_metrics = 2; const int time_step = 0; const int metric_index = 1; Eigen::Tensor<float, 2> y_true(batch_size, layer_size); y_true.setValues({ {1, 1, 1, 1}, {1, 1, 1, 1} }); Eigen::Tensor<float, 3> y_pred(batch_size, memory_size, layer_size); y_pred.setValues({ {{3, 2, 1, 0}, {0, 0, 0, 0}}, {{2, 3, 2, 3}, {0, 0, 0, 0}} }); Eigen::DefaultDevice device; PercentDifferenceTensorOp<float, Eigen::DefaultDevice> operation_sum(std::string("Sum")); Eigen::Tensor<float, 2> error_sum(n_metrics, memory_size); error_sum.setZero(); operation_sum(y_pred.data(), y_true.data(), error_sum.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_sum(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 0), 9.99999046, 1e-4); BOOST_CHECK_CLOSE(error_sum(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_sum(1, 1), 0, 1e-4); PercentDifferenceTensorOp<float, Eigen::DefaultDevice> operation_mean(std::string("Mean")); Eigen::Tensor<float, 2> error_mean(n_metrics, memory_size); error_mean.setZero(); operation_mean(y_pred.data(), y_true.data(), error_mean.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_mean(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 0), 4.99999523, 1e-4); BOOST_CHECK_CLOSE(error_mean(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_mean(1, 1), 0, 1e-4); PercentDifferenceTensorOp<float, Eigen::DefaultDevice> operation_var(std::string("Var")); Eigen::Tensor<float, 2> error_var(n_metrics, memory_size); error_var.setZero(); operation_var(y_pred.data(), y_true.data(), error_var.data(), batch_size, memory_size, layer_size, n_metrics, time_step, metric_index, device); BOOST_CHECK_CLOSE(error_var(0, 0), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 0), 2.0, 1e-4); BOOST_CHECK_CLOSE(error_var(0, 1), 0, 1e-4); BOOST_CHECK_CLOSE(error_var(1, 1), 0, 1e-4); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #include <SmartPeak/simulator/BiochemicalReaction.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace SmartPeak; /* @brief Find significant pair-wise MARS between samples (one pre/post vs. all pre/post) */ PWData PWComparison(BiochemicalReactionModel<float>& metabolomics_data, const std::vector<std::string>& sample_names, int n_samples = 10000, float alpha = 0.05, float fc = 1.0) { PWData pw_data; for (const std::string& mar : metabolomics_data.reaction_ids_) { for (size_t sgn1_iter = 0; sgn1_iter < sample_names.size(); ++sgn1_iter) { // check if the sample name exists if (metabolomics_data.metabolomicsData_.count(sample_names[sgn1_iter]) == 0) continue; // sample the MAR data std::vector<float> samples1; for (int sample_iter = 0; sample_iter < n_samples; ++sample_iter) { samples1.push_back( metabolomics_data.calculateMAR(metabolomics_data.metabolomicsData_.at(sample_names[sgn1_iter]), metabolomics_data.biochemicalReactions_.at(mar))); } for (size_t sgn2_iter = sgn1_iter + 1; sgn2_iter < sample_names.size(); ++sgn2_iter) { // check if the sample name exists if (metabolomics_data.metabolomicsData_.count(sample_names[sgn2_iter]) == 0) continue; std::cout << "MAR: " << mar << " Sample1: " << sgn1_iter << " Sample2: " << sgn2_iter << std::endl; // initialize the data struct PWStats pw_stats; pw_stats.feature_name = mar; pw_stats.feature_comment = metabolomics_data.biochemicalReactions_.at(mar).equation; pw_stats.sample_name_1 = sample_names[sgn1_iter]; pw_stats.sample_name_2 = sample_names[sgn2_iter]; pw_stats.n1 = n_samples; pw_stats.n2 = n_samples; // sample the MAR data std::vector<float> samples2; for (int sample_iter = 0; sample_iter < n_samples; ++sample_iter) { samples2.push_back( metabolomics_data.calculateMAR(metabolomics_data.metabolomicsData_.at(sample_names[sgn2_iter]), metabolomics_data.biochemicalReactions_.at(mar))); } // calculate the moments and fold change float ave1, adev1, sdev1, var1, skew1, curt1; SmartPeak::moment(&samples1[0], n_samples, ave1, adev1, sdev1, var1, skew1, curt1); float ave2, adev2, sdev2, var2, skew2, curt2; SmartPeak::moment(&samples2[0], n_samples, ave2, adev2, sdev2, var2, skew2, curt2); pw_stats.fold_change = std::log2(ave2 / ave1); // calculate the 95% CI pw_stats.confidence_interval_1 = confidence(samples1, alpha); pw_stats.confidence_interval_2 = confidence(samples2, alpha); //// calculate the K-S prob //float d, prob; //kstwo(&samples1[0], n_samples, &samples2[0], n_samples, d, prob); //pw_stats.prob = prob; //if (prob < 0.05) { if ((pw_stats.confidence_interval_1.first > pw_stats.confidence_interval_2.second || pw_stats.confidence_interval_1.second < pw_stats.confidence_interval_2.first) && (pw_stats.fold_change > fc || pw_stats.fold_change < -fc)) { pw_stats.is_significant = true; std::vector<PWStats> pw_stats_vec = { pw_stats }; auto found = pw_data.emplace(mar, pw_stats_vec); if (!found.second) { pw_data.at(mar).push_back(pw_stats); } } } } } return pw_data; } /* @brief Find significant pair-wise MARS between pre/post samples (one pre vs one post) */ PWData PWPrePostComparison(BiochemicalReactionModel<float>& metabolomics_data, std::vector<std::string>& pre_samples, std::vector<std::string>& post_samples, const int& n_pairs, int n_samples = 10000, float alpha = 0.05, float fc = 1.0) { PWData pw_data; for (const std::string& mar : metabolomics_data.reaction_ids_) { for (size_t pairs_iter = 0; pairs_iter < n_pairs; ++pairs_iter) { // check if the sample name exists if (metabolomics_data.metabolomicsData_.count(pre_samples[pairs_iter]) == 0 || metabolomics_data.metabolomicsData_.count(post_samples[pairs_iter]) == 0) continue; std::cout << "MAR: " << mar << " Pair: " << pairs_iter << std::endl; // initialize the data struct PWStats pw_stats; pw_stats.feature_name = mar; pw_stats.feature_comment = metabolomics_data.biochemicalReactions_.at(mar).equation; pw_stats.sample_name_1 = pre_samples[pairs_iter]; pw_stats.sample_name_2 = post_samples[pairs_iter]; pw_stats.n1 = n_samples; pw_stats.n2 = n_samples; // sample the MAR data std::vector<float> samples1, samples2; for (int sample_iter = 0; sample_iter < n_samples; ++sample_iter) { samples1.push_back( metabolomics_data.calculateMAR(metabolomics_data.metabolomicsData_.at(pre_samples[pairs_iter]), metabolomics_data.biochemicalReactions_.at(mar))); samples2.push_back( metabolomics_data.calculateMAR(metabolomics_data.metabolomicsData_.at(post_samples[pairs_iter]), metabolomics_data.biochemicalReactions_.at(mar))); } // calculate the moments and fold change float ave1, adev1, sdev1, var1, skew1, curt1; moment(&samples1[0], n_samples, ave1, adev1, sdev1, var1, skew1, curt1); float ave2, adev2, sdev2, var2, skew2, curt2; moment(&samples2[0], n_samples, ave2, adev2, sdev2, var2, skew2, curt2); pw_stats.fold_change = std::log2(ave2 / ave1); // calculate the 95% CI pw_stats.confidence_interval_1 = confidence(samples1, alpha); pw_stats.confidence_interval_2 = confidence(samples2, alpha); //// calculate the K-S prob //float d, prob; //kstwo(&samples1[0], n_samples, &samples2[0], n_samples, d, prob); //pw_stats.prob = prob; //if (prob < 0.05) { if ((pw_stats.confidence_interval_1.first > pw_stats.confidence_interval_2.second || pw_stats.confidence_interval_1.second < pw_stats.confidence_interval_2.first) && (pw_stats.fold_change > fc || pw_stats.fold_change < -fc)) { pw_stats.is_significant = true; std::vector<PWStats> pw_stats_vec = { pw_stats }; auto found = pw_data.emplace(mar, pw_stats_vec); if (!found.second) { pw_data.at(mar).push_back(pw_stats); } } } } return pw_data; } /* @brief Find significant pair-wise MARS between pre/post samples (one pre vs one post) */ PWData PWPrePostDifference(BiochemicalReactionModel<float>& metabolomics_data, std::vector<std::string>& pre_samples, std::vector<std::string>& post_samples, const int& n_pairs, int n_samples = 10000, float alpha = 0.05, float fc = 0.43229) { PWData pw_data; for (const std::string& mar : metabolomics_data.reaction_ids_) { for (size_t pairs_iter1 = 0; pairs_iter1 < n_pairs; ++pairs_iter1) { std::string sample_name_1 = post_samples[pairs_iter1] + "-" + pre_samples[pairs_iter1]; // sample the MAR data std::vector<float> samples1; for (int sample_iter = 0; sample_iter < n_samples; ++sample_iter) { float s1 = metabolomics_data.calculateMAR(metabolomics_data.metabolomicsData_.at(pre_samples[pairs_iter1]), metabolomics_data.biochemicalReactions_.at(mar)); float s2 = metabolomics_data.calculateMAR(metabolomics_data.metabolomicsData_.at(post_samples[pairs_iter1]), metabolomics_data.biochemicalReactions_.at(mar)); samples1.push_back(s2 - s1); } // calculate the moments and fold change float ave1, adev1, sdev1, var1, skew1, curt1; moment(&samples1[0], n_samples, ave1, adev1, sdev1, var1, skew1, curt1); // calculate the 95% CI std::pair<float, float> confidence_interval_1 = confidence(samples1, alpha); for (size_t pairs_iter2 = pairs_iter1 + 1; pairs_iter2 < n_pairs; ++pairs_iter2) { std::cout << "MAR: " << mar << " Pair1: " << pairs_iter1 << " Pair2: " << pairs_iter2 << std::endl; std::string sample_name_2 = post_samples[pairs_iter2] + "-" + pre_samples[pairs_iter2]; // initialize the data struct PWStats pw_stats; pw_stats.feature_name = mar; pw_stats.feature_comment = metabolomics_data.biochemicalReactions_.at(mar).equation; pw_stats.sample_name_1 = sample_name_1; pw_stats.sample_name_2 = sample_name_2; pw_stats.n1 = n_samples; pw_stats.n2 = n_samples; // sample the MAR data std::vector<float> samples2; for (int sample_iter = 0; sample_iter < n_samples; ++sample_iter) { float s1 = metabolomics_data.calculateMAR(metabolomics_data.metabolomicsData_.at(pre_samples[pairs_iter2]), metabolomics_data.biochemicalReactions_.at(mar)); float s2 = metabolomics_data.calculateMAR(metabolomics_data.metabolomicsData_.at(post_samples[pairs_iter2]), metabolomics_data.biochemicalReactions_.at(mar)); samples2.push_back(s2 - s1); } // calculate the moments and fold change float ave2, adev2, sdev2, var2, skew2, curt2; moment(&samples2[0], n_samples, ave2, adev2, sdev2, var2, skew2, curt2); // calculate the 95% CI std::pair<float, float> confidence_interval_2 = confidence(samples2, alpha); // calculate the normalized geometric fold change pw_stats.fold_change = std::log(std::exp(ave2) / std::exp(ave1)) / (std::log(std::exp(ave2) + std::exp(ave1))); pw_stats.confidence_interval_1 = confidence_interval_1; pw_stats.confidence_interval_2 = confidence_interval_2; //if (prob < 0.05) { if ((pw_stats.confidence_interval_1.first > pw_stats.confidence_interval_2.second || pw_stats.confidence_interval_1.second < pw_stats.confidence_interval_2.first) && (pw_stats.fold_change > fc || pw_stats.fold_change < -fc)) { pw_stats.is_significant = true; std::vector<PWStats> pw_stats_vec = { pw_stats }; auto found = pw_data.emplace(mar, pw_stats_vec); if (!found.second) { pw_data.at(mar).push_back(pw_stats); } } } } } return pw_data; } void PWSummary(const PWData& pw_data, PWSampleSummaries& pw_sample_summaries, PWFeatureSummaries& pw_feature_summaries, PWTotalSummary& pw_total_summary) { std::map<std::string, PWSampleSummary> pw_sample_summary_map; std::map<std::string, PWFeatureSummary> pw_feature_summary_map; for (const auto& pw_datum : pw_data) { for (const auto& pw_stats : pw_datum.second) { if (!pw_stats.is_significant) continue; // Samples PWSampleSummary pw_sample_summary; pw_sample_summary.sample_name_1 = pw_stats.sample_name_1; pw_sample_summary.sample_name_2 = pw_stats.sample_name_2; pw_sample_summary.n_significant = 1; std::string key = pw_stats.sample_name_1 + "_vs_" + pw_stats.sample_name_2; auto found_samples = pw_sample_summary_map.emplace(key, pw_sample_summary); if (!found_samples.second) { pw_sample_summary_map.at(key).n_significant += 1; } // Features PWFeatureSummary pw_feature_summary; pw_feature_summary.feature_name = pw_stats.feature_name; pw_feature_summary.n_significant = 1; auto found_features = pw_feature_summary_map.emplace(pw_stats.feature_name, pw_feature_summary); if (!found_features.second) { pw_feature_summary_map.at(pw_stats.feature_name).n_significant += 1; } // Totals pw_total_summary.n_significant_total += 1; pw_total_summary.significant_features.insert(pw_stats.feature_name); pw_total_summary.significant_pairs.insert(key); } } // Samples for (const auto& map : pw_sample_summary_map) pw_sample_summaries.push_back(map.second); std::sort(pw_sample_summaries.begin(), pw_sample_summaries.end(), [](const PWSampleSummary& a, const PWSampleSummary& b) { return a.sample_name_2 < b.sample_name_2; }); std::sort(pw_sample_summaries.begin(), pw_sample_summaries.end(), [](const PWSampleSummary& a, const PWSampleSummary& b) { return a.sample_name_1 < b.sample_name_1; }); // Features for (const auto& map : pw_feature_summary_map) pw_feature_summaries.push_back(map.second); std::sort(pw_feature_summaries.begin(), pw_feature_summaries.end(), [](const PWFeatureSummary& a, const PWFeatureSummary& b) { return a.feature_name < b.feature_name; }); // Totals pw_total_summary.n_significant_features = (int)pw_total_summary.significant_features.size(); pw_total_summary.n_significant_pairs = (int)pw_total_summary.significant_pairs.size(); } bool WritePWData(const std::string& filename, const PWData& pw_data) { // Export the results to file CSVWriter csvwriter(filename); std::vector<std::string> headers = { "Feature", "FeatureComment", "Sample1", "Sample2", "LB1", "LB2", "UB1", "UB2", "Log2(FC)" }; csvwriter.writeDataInRow(headers.begin(), headers.end()); for (const auto& pw_datum : pw_data) { for (const auto& pw_stats : pw_datum.second) { std::vector<std::string> line; line.push_back(pw_stats.feature_name); line.push_back(pw_stats.feature_comment); line.push_back(pw_stats.sample_name_1); line.push_back(pw_stats.sample_name_2); line.push_back(std::to_string(pw_stats.confidence_interval_1.first)); line.push_back(std::to_string(pw_stats.confidence_interval_2.first)); line.push_back(std::to_string(pw_stats.confidence_interval_1.second)); line.push_back(std::to_string(pw_stats.confidence_interval_2.second)); line.push_back(std::to_string(pw_stats.fold_change)); csvwriter.writeDataInRow(line.begin(), line.end()); } } return true; } bool ReadPWData(const std::string& filename, PWData& pw_data) { io::CSVReader<8> data_in(filename); data_in.read_header(io::ignore_extra_column, "Feature", "Sample1", "Sample2", "LB1", "LB2", "UB1", "UB2", "Log2(FC)"); std::string feature_str, sample_1_str, sample_2_str, lb1_str, lb2_str, ub1_str, ub2_str, log2fc_str; while (data_in.read_row(feature_str, sample_1_str, sample_2_str, lb1_str, lb2_str, ub1_str, ub2_str, log2fc_str)) { // parse the .csv file PWStats pw_stats; pw_stats.feature_name = feature_str; pw_stats.sample_name_1 = sample_1_str; pw_stats.sample_name_2 = sample_2_str; pw_stats.confidence_interval_1 = std::make_pair(std::stof(lb1_str), std::stof(ub1_str)); pw_stats.confidence_interval_2 = std::make_pair(std::stof(lb2_str), std::stof(ub2_str)); pw_stats.fold_change = std::stof(log2fc_str); pw_stats.is_significant = true; std::vector<PWStats> pw_stats_vec = { pw_stats }; auto found = pw_data.emplace(feature_str, pw_stats_vec); if (!found.second) { pw_data.at(feature_str).push_back(pw_stats); } } return true; } bool WritePWSampleSummaries(const std::string& filename, const PWSampleSummaries& pw_sample_summaries) { // Export the results to file CSVWriter csvwriter(filename); std::vector<std::string> headers = { "Sample1", "Sample2", "Sig_pairs" }; csvwriter.writeDataInRow(headers.begin(), headers.end()); for (const auto& pw_sample_summary : pw_sample_summaries) { std::vector<std::string> line; line.push_back(pw_sample_summary.sample_name_1); line.push_back(pw_sample_summary.sample_name_2); line.push_back(std::to_string(pw_sample_summary.n_significant)); csvwriter.writeDataInRow(line.begin(), line.end()); } return true; } bool WritePWFeatureSummaries(const std::string& filename, const PWFeatureSummaries& pw_feature_summaries) { // Export the results to file CSVWriter csvwriter(filename); std::vector<std::string> headers = { "Feature", "Sig_features" }; csvwriter.writeDataInRow(headers.begin(), headers.end()); for (const auto& pw_feature_summary : pw_feature_summaries) { std::vector<std::string> line; line.push_back(pw_feature_summary.feature_name); line.push_back(std::to_string(pw_feature_summary.n_significant)); csvwriter.writeDataInRow(line.begin(), line.end()); } return true; }<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE modellogger test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelLogger.h> #include <EvoNet/ml/ModelBuilder.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(ModelLogger1) BOOST_AUTO_TEST_CASE(constructor) { ModelLogger<float>* ptr = nullptr; ModelLogger<float>* nullPointer = nullptr; ptr = new ModelLogger<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ModelLogger<float>* ptr = nullptr; ptr = new ModelLogger<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(gettersAndSetters1) { ModelLogger<float> model_logger; BOOST_CHECK(!model_logger.getLogTimeEpoch()); BOOST_CHECK(!model_logger.getLogTrainValMetricEpoch()); BOOST_CHECK(!model_logger.getLogExpectedEpoch()); BOOST_CHECK(!model_logger.getLogWeightsEpoch()); BOOST_CHECK(!model_logger.getLogNodeErrorsEpoch()); BOOST_CHECK(!model_logger.getLogNodeOutputsEpoch()); BOOST_CHECK(!model_logger.getLogNodeDerivativesEpoch()); BOOST_CHECK(!model_logger.getLogNodeInputsEpoch()); BOOST_CHECK_EQUAL(model_logger.getLogDir(), ""); } BOOST_AUTO_TEST_CASE(gettersAndSetters2) { ModelLogger<float> model_logger(true, true, true, true, true, true, true, true); BOOST_CHECK(model_logger.getLogTimeEpoch()); BOOST_CHECK(model_logger.getLogTrainValMetricEpoch()); BOOST_CHECK(model_logger.getLogExpectedEpoch()); BOOST_CHECK(model_logger.getLogWeightsEpoch()); BOOST_CHECK(model_logger.getLogNodeErrorsEpoch()); BOOST_CHECK(model_logger.getLogNodeOutputsEpoch()); BOOST_CHECK(model_logger.getLogNodeDerivativesEpoch()); BOOST_CHECK(model_logger.getLogNodeInputsEpoch()); model_logger.setLogDir("NotHere!"); BOOST_CHECK_EQUAL(model_logger.getLogDir(), "NotHere!"); } BOOST_AUTO_TEST_CASE(initLogs) { Model<float> model; model.setName("Model1"); ModelLogger<float> model_logger(true, true, true, true, true, true, true, true); model_logger.initLogs(model); BOOST_CHECK_EQUAL(model_logger.getLogTimeEpochCSVWriter().getFilename(), "Model1_TimePerEpoch.csv"); BOOST_CHECK_EQUAL(model_logger.getLogTimeEpochCSVWriter().getLineCount(), 0); BOOST_CHECK_EQUAL(model_logger.getLogTrainValMetricEpochCSVWriter().getFilename(), "Model1_TrainValMetricsPerEpoch.csv"); BOOST_CHECK_EQUAL(model_logger.getLogTrainValMetricEpochCSVWriter().getLineCount(), 0); BOOST_CHECK_EQUAL(model_logger.getLogExpectedEpochCSVWriter().getFilename(), "Model1_ExpectedPerEpoch.csv"); BOOST_CHECK_EQUAL(model_logger.getLogExpectedEpochCSVWriter().getLineCount(), 0); BOOST_CHECK_EQUAL(model_logger.getLogWeightsEpochCSVWriter().getFilename(), "Model1_WeightsPerEpoch.csv"); BOOST_CHECK_EQUAL(model_logger.getLogWeightsEpochCSVWriter().getLineCount(), 0); BOOST_CHECK_EQUAL(model_logger.getLogNodeErrorsEpochCSVWriter().getFilename(), "Model1_NodeErrorsPerEpoch.csv"); BOOST_CHECK_EQUAL(model_logger.getLogNodeErrorsEpochCSVWriter().getLineCount(), 0); BOOST_CHECK_EQUAL(model_logger.getLogNodeOutputsEpochCSVWriter().getFilename(), "Model1_NodeOutputsPerEpoch.csv"); BOOST_CHECK_EQUAL(model_logger.getLogNodeOutputsEpochCSVWriter().getLineCount(), 0); BOOST_CHECK_EQUAL(model_logger.getLogNodeDerivativesEpochCSVWriter().getFilename(), "Model1_NodeDerivativesPerEpoch.csv"); BOOST_CHECK_EQUAL(model_logger.getLogNodeDerivativesEpochCSVWriter().getLineCount(), 0); BOOST_CHECK_EQUAL(model_logger.getLogNodeInputsEpochCSVWriter().getFilename(), "Model1_NodeInputsPerEpoch.csv"); BOOST_CHECK_EQUAL(model_logger.getLogNodeInputsEpochCSVWriter().getLineCount(), 0); } BOOST_AUTO_TEST_CASE(logTimePerEpoch) { Model<float> model; model.setName("Model1"); ModelLogger<float> model_logger(true, false, false, false, false, false, false, false); model_logger.initLogs(model); model_logger.logTimePerEpoch(model, 0); model_logger.logTimePerEpoch(model, 1); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(logTrainValMetricsPerEpoch) { Model<float> model; model.setName("Model1"); ModelLogger<float> model_logger(false, true, false, false, false, false, false, false); model_logger.initLogs(model); std::vector<std::string> training_metric_names = { "Error" }; std::vector<std::string> validation_metric_names = { "Error" }; std::vector<float> training_metrics, validation_metrics; training_metrics = { 10.0f }; validation_metrics = { 10.1f }; model_logger.logTrainValMetricsPerEpoch(model, training_metric_names, validation_metric_names, training_metrics, validation_metrics, 0); training_metrics = { 1.0f }; validation_metrics = { 1.1f }; model_logger.logTrainValMetricsPerEpoch(model, training_metric_names, validation_metric_names, training_metrics, validation_metrics, 1); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(logExpectedOutputPerEpoch) { // make the model ModelBuilder<float> model_builder; Model<float> model; model.setName("Model1"); std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 2); std::vector<std::string> node_names_test = { "Input_0", "Input_1" }; int batch_size = 2; int memory_size = 1; Eigen::Tensor<float, 3> expected_values(batch_size, memory_size, (int)node_names.size()); expected_values.setConstant(2.0f); model.setBatchAndMemorySizes(batch_size, memory_size); ModelLogger<float> model_logger(false, false, true, false, false, false, false, false); model_logger.initLogs(model); model_logger.logExpectedOutputPerEpoch(model, node_names, expected_values, 0); model_logger.logExpectedOutputPerEpoch(model, node_names, expected_values, 1); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(logWeightsPerEpoch) { // make the model ModelBuilder<float> model_builder; Model<float> model; model.setName("Model1"); std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 2); node_names = model_builder.addFullyConnected(model, "Hidden", "Mod1", node_names, 2, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9))); int batch_size = 2; int memory_size = 1; ModelLogger<float> model_logger(false, false, false, true, false, false, false, false); model_logger.initLogs(model); model_logger.logWeightsPerEpoch(model, 0, {}); model_logger.logWeightsPerEpoch(model, 1, {}); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(logNodeErrorsPerEpoch) { // make the model ModelBuilder<float> model_builder; Model<float> model; model.setName("Model1"); std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 2); node_names = model_builder.addFullyConnected(model, "Hidden", "Mod1", node_names, 2, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9))); int batch_size = 2; int memory_size = 1; ModelLogger<float> model_logger(false, false, false, false, true, false, false, false); model_logger.initLogs(model); model_logger.logNodeErrorsPerEpoch(model, 0, node_names); model_logger.logNodeErrorsPerEpoch(model, 1, node_names); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(logNodeOutputsPerEpoch) { // make the model ModelBuilder<float> model_builder; Model<float> model; model.setName("Model1"); std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 2); node_names = model_builder.addFullyConnected(model, "Hidden", "Mod1", node_names, 2, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9))); int batch_size = 2; int memory_size = 1; ModelLogger<float> model_logger(false, false, false, false, false, true, false, false); model_logger.initLogs(model); model_logger.logNodeOutputsPerEpoch(model, 0, node_names); model_logger.logNodeOutputsPerEpoch(model, 1, node_names); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(logNodeDerivativesPerEpoch) { // make the model ModelBuilder<float> model_builder; Model<float> model; model.setName("Model1"); std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 2); node_names = model_builder.addFullyConnected(model, "Hidden", "Mod1", node_names, 2, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9))); int batch_size = 2; int memory_size = 1; ModelLogger<float> model_logger(false, false, false, false, false, false, true, false); model_logger.initLogs(model); model_logger.logNodeDerivativesPerEpoch(model, 0, node_names); model_logger.logNodeDerivativesPerEpoch(model, 1, node_names); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(logNodeInputsPerEpoch) { // make the model ModelBuilder<float> model_builder; Model<float> model; model.setName("Model1"); std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 2); node_names = model_builder.addFullyConnected(model, "Hidden", "Mod1", node_names, 2, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9))); int batch_size = 2; int memory_size = 1; ModelLogger<float> model_logger(false, false, false, false, false, false, false, true); model_logger.initLogs(model); model_logger.logNodeDerivativesPerEpoch(model, 0, node_names); model_logger.logNodeDerivativesPerEpoch(model, 1, node_names); // [TODO: read in and check] } BOOST_AUTO_TEST_CASE(writeLogs) { ModelBuilder<float> model_builder; Model<float> model; model.setName("Model1"); std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 2); node_names = model_builder.addFullyConnected(model, "Hidden", "Mod1", node_names, 2, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>()), std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<SGDOp<float>>(SGDOp<float>(0.1, 0.9))); int batch_size = 2; int memory_size = 1; Eigen::Tensor<float, 3> expected_values(batch_size, memory_size, (int)node_names.size()); expected_values.setConstant(2.0f); model.setBatchAndMemorySizes(batch_size, memory_size); ModelLogger<float> model_logger(true, true, true, true, true, true, true, true); model_logger.initLogs(model); std::vector<std::string> training_metric_names = { "Error" }; std::vector<std::string> validation_metric_names = { "Error" }; std::vector<float> training_metrics, validation_metrics; training_metrics = { 20.0f }; validation_metrics = { 20.1f }; model_logger.writeLogs(model, 0, training_metric_names, validation_metric_names, training_metrics, validation_metrics, node_names, expected_values, node_names, node_names, node_names, node_names, {}); training_metrics = { 2.0f }; validation_metrics = { 2.1f }; model_logger.writeLogs(model, 1, training_metric_names, validation_metric_names, training_metrics, validation_metrics, node_names, expected_values, node_names, node_names, node_names, node_names, {}); // [TODO: read in and check] } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELTRAINEREXPERIMENTAL_H #define EVONET_MODELTRAINEREXPERIMENTAL_H // .h #include <EvoNet/ml/ModelTrainer.h> // .cpp namespace EvoNet { /** @brief Experimental features of `ModelTrainer` */ template<typename TensorT, typename InterpreterT> class ModelTrainerExperimental: public ModelTrainer<TensorT, InterpreterT> { public: ModelTrainerExperimental() = default; ///< Default constructor ~ModelTrainerExperimental() = default; ///< Default destructor /// Overrides used in all examples void trainingModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error) override; void trainingModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) override; void validationModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error) override; void evaluationModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes) override; void evaluationModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const Eigen::Tensor<TensorT, 1>& model_metrics) override; }; template<typename TensorT, typename InterpreterT> inline void ModelTrainerExperimental<TensorT, InterpreterT>::trainingModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error) { // Left blank intentionally to prevent writing of files during population training } template<typename TensorT, typename InterpreterT> inline void ModelTrainerExperimental<TensorT, InterpreterT>::trainingModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); model_logger.setLogNodeInputsEpoch(false); model_logger.setLogNodeOutputsEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1000 == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_interpreter.getModelResults(model, true, false, false, true); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->getMetricNamesLinearized()) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } template<typename TensorT, typename InterpreterT> inline void ModelTrainerExperimental<TensorT, InterpreterT>::validationModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error) { // Left blank intentionally to prevent writing of files during population validation } template<typename TensorT, typename InterpreterT> inline void ModelTrainerExperimental<TensorT, InterpreterT>::evaluationModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes) { // Left blank intentionally to prevent writing of files during population evaluation } template<typename TensorT, typename InterpreterT> inline void ModelTrainerExperimental<TensorT, InterpreterT>::evaluationModelLogger(const int& n_epochs, Model<TensorT>& model, InterpreterT& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const Eigen::Tensor<TensorT, 1>& model_metrics) { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); model_logger.setLogNodeInputsEpoch(false); model_logger.setLogNodeOutputsEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1 == 0) { // FIXME model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_interpreter.getModelResults(model, true, false, false, true); } // Create the metric headers and data arrays std::vector<std::string> log_headers; std::vector<TensorT> log_values; int metric_iter = 0; for (const std::string& metric_name : this->getMetricNamesLinearized()) { log_headers.push_back(metric_name); log_values.push_back(model_metrics(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_headers, {}, log_values, {}, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } } #endif //EVONET_MODELTRAINEREXPERIMENTAL_H<file_sep>.. include:: api/library_root.rst<file_sep>include(${CMAKE_ROOT}/Modules/ExternalProject.cmake) set_property (DIRECTORY PROPERTY EP_BASE Dependencies) set (DEPENDENCIES) set (EXTRA_CMAKE_ARGS) set (BOOST_VERSION 1.67.0) message (STATUS "Adding Boost ${BOOST_VERSION} as an external project.") if (WIN32) set (BOOST_BOOTSTRAP_CMD bootstrap.bat) set (BOOST_BUILD_CMD b2.exe) elseif (UNIX) set (BOOST_BOOTSTRAP_CMD ./bootstrap.sh) set (BOOST_BUILD_CMD ./b2) endif () if (CMAKE_SIZEOF_VOID_P MATCHES "8") set(BOOST_ADDR_MODEL 64 CACHE INTERNAL "Architecture-bits") set(BOOST_ARCH_MODEL "address-model=64") else() set(BOOST_ADDR_MODEL 32 CACHE INTERNAL "Architecture-bits") set(BOOST_ARCH_MODEL "") endif() message(STATUS "ADDRESSMODEL is: ${BOOST_ADDR_MODEL}-bit") if (NOT BOOST_ADDR_MODEL MATCHES "32|64") Message(FATAL_ERROR "BOOST_ADDR_MODEL is neither 32 nor 64! Please correct this!") endif() if (MSVC) #if (WIN32) ## check that the console environment has a cl.exe architecture which is identical to the VS Generator ## If cl.exe builds 32-bit libs and VS Generator is Win64, we'd end up with mixed 32bit/64bit libraries, depending on how each lib is build (Cmake, bjam, nmake) execute_process(COMMAND "cl.exe" OUTPUT_VARIABLE cl_out ERROR_VARIABLE cl_out) if (cl_out MATCHES ".*x64.*") message(STATUS "Cl.exe produces: 64-bit") set(CL_ADDRESSMODEL 64) elseif (cl_out MATCHES ".*x86.*") message(STATUS "Cl.exe produces: 32-bit") set(CL_ADDRESSMODEL 32) else() message(FATAL_ERROR "Could not determine if cl.exe builds x86 or x64 apps. Make sure cl.exe is available in your environment!") endif() if (NOT (CL_ADDRESSMODEL EQUAL BOOST_ADDR_MODEL)) message(FATAL_ERROR "cl.exe (${CL_ADDRESSMODEL}-bit) and Visual Studio Generator (${BOOST_ADDR_MODEL}-bit) do not match. Please fix your PATH environment to find the proper cl.exe or use/omit the Win64 generator.") endif() if (CMAKE_GENERATOR MATCHES ".*Visual Studio 8 2005.*") set(BOOST_TOOLSET msvc-8.0) elseif (CMAKE_GENERATOR MATCHES ".*Visual Studio 9 2008.*") set(BOOST_TOOLSET msvc-19.0) elseif (CMAKE_GENERATOR MATCHES ".*Visual Studio 10.*") set(BOOST_TOOLSET msvc-10.0) elseif (CMAKE_GENERATOR MATCHES ".*Visual Studio 11.*") set(BOOST_TOOLSET msvc-11.0) elseif (CMAKE_GENERATOR MATCHES ".*Visual Studio 12.*") set(BOOST_TOOLSET msvc-12.0) elseif (CMAKE_GENERATOR MATCHES ".*Visual Studio 14.*") set(BOOST_TOOLSET msvc-14.0) elseif (CMAKE_GENERATOR MATCHES ".*Visual Studio 15.*") set(BOOST_TOOLSET msvc-15.0) else() if (OVERRIDE_GENERATOR) message(FATAL_ERROR "Chosen to override the Generator check, proceed with caution.") else() message(FATAL_ERROR "Please use 'Visual Studio ?? [Win64]' (??={8 2005, 9 2008, 10, 11, 12, 14, 15}) as Generator - identical to the MSVC toolchain you plan to use! There will be errors (mostly missing libraries). Under very special circumstances, you can override this with -DOVERRIDE_GENERATOR=On.") endif() endif() set (BOOST_BUILD_ARGS address-model=${BOOST_ADDR_MODEL} ${BOOST_ARCH_MODEL} --with-test toolset=${BOOST_TOOLSET} variant=release link=static --prefix=${CMAKE_CURRENT_BINARY_DIR}/Dependencies/Source/boost/build install) else() ## linux/macos if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") if(APPLE) set(BOOST_TOOLSET "clang-darwin") else() set(_boost_bootstrap_toolchain "clang") set(BOOST_TOOLSET "clang") endif() elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU*") if(APPLE) ## For Apples old GCC (tag in lib name will be xgcc) set(BOOST_TOOLSET "darwin") else() set(BOOST_TOOLSET "gcc") endif() endif() set (BOOST_BUILD_ARGS --with-test --prefix=${CMAKE_CURRENT_BINARY_DIR}/Dependencies/Source/boost/build install) endif() ExternalProject_Add(boost #PREFIX ${CMAKE_CURRENT_BINARY_DIR}/boost GIT_REPOSITORY "https://github.com/boostorg/boost.git" GIT_TAG boost-${BOOST_VERSION} UPDATE_COMMAND "" CONFIGURE_COMMAND ${BOOST_BOOTSTRAP_CMD} BUILD_IN_SOURCE 1 BUILD_COMMAND ${BOOST_BUILD_CMD} ${BOOST_BUILD_ARGS} INSTALL_COMMAND "" GIT_PROGRESS 1 LOG_DOWNLOAD 1 LOG_UPDATE 1 LOG_CONFIGURE 1 LOG_BUILD 1 LOG_INSTALL 1 ) list (APPEND EXTRA_CMAKE_ARGS -DBOOST_ROOT=${CMAKE_CURRENT_BINARY_DIR}/Dependencies/Source/boost -DBoost_NO_SYSTEM_PATHS=ON)<file_sep>#ifndef EVONET_MODELRESOURCES_H #define EVONET_MODELRESOURCES_H #include <cereal/access.hpp> // serialiation of private members namespace EvoNet { /** @brief Helper class used by the user to define the device resources */ class ModelDevice { public: ModelDevice() = default; ///< Default constructor ModelDevice(const int& id, const int& n_engines) : id_(id), n_engines_(n_engines) {}; ///< Constructor ModelDevice(const int& id) : id_(id) {}; ///< Constructor ~ModelDevice() = default; ///< Destructor void setID(const int& id) { id_ = id;}; void setNEngines(const int& n_engines) { n_engines_ = n_engines; }; int getID() const { return id_; }; int getNEngines() const { return n_engines_; }; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(id_, n_engines_); } int id_; ///< ID of the device int n_engines_ = -1; ///< the number of threads (CPU) or asynchroous engines (GPU) available on the device }; /** @brief List of available devices for training each model The ModelTrainer will invoke the correct ModelInterpreter based on the DeviceType */ typedef std::vector<ModelDevice> ModelResources; /** @brief List of available resources for training each model in the population. It is assumed that each element in the vector will be given a seperate thread to control the model training. Example 1: 1 GPU per model for a population size of 16 with 4 concurrent training sessions ModelResources.size() = 1; PopulationResources.size() = 4; Example 2: 2 GPU per model for a population size of 16 with 4 concurrent training sessions ModelResources.size() = 2; PopulationResources.size() = 4; Example 3: 1 CPU Pool per model for a population size of 16 with 4 concurrent training sessions ModelResources.size() = 1; PopulationResources.size() = 4; */ typedef std::vector<ModelResources> PopulationResources; } #endif //EVONET_MODELRESOURCES_H<file_sep>/**TODO: Add copyright*/ #include <SmartPeak/ml/PopulationTrainerDefaultDevice.h> #include <SmartPeak/ml/ModelTrainerDefaultDevice.h> #include <SmartPeak/ml/ModelReplicator.h> #include <SmartPeak/ml/ModelBuilder.h> #include <SmartPeak/ml/Model.h> #include <SmartPeak/io/PopulationTrainerFile.h> #include <SmartPeak/io/ModelInterpreterFileDefaultDevice.h> #include <SmartPeak/io/ModelFile.h> #include <SmartPeak/simulator/MNISTSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace SmartPeak; // Extended template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: /* @brief Basic AutoEncoder @param[in, out] model The network model @param[in] n_inputs The number of input pixels @param[in] n_encodings The length of the encodings layer @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeAE(Model<TensorT>& model, int n_inputs = 784, int n_encodings = 64, int n_hidden_0 = 512, bool specify_layer = true, bool add_norm = true) { model.setId(0); model.setName("VAE"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layer); // Add the Endocer FC layers std::vector<std::string> node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names_input, n_hidden_0, std::shared_ptr<ActivationOp<TensorT>>(new ELUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ELUGradOp<TensorT>()), std::shared_ptr<IntegrationOp<TensorT>>(new SumOp<TensorT>()), std::shared_ptr<IntegrationErrorOp<TensorT>>(new SumErrorOp<TensorT>()), std::shared_ptr<IntegrationWeightGradOp<TensorT>>(new SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0.001, 0.1)), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names_input.size() + n_hidden_0, 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0f, 0.0f, false, specify_layer); if (add_norm) { node_names = model_builder.addNormalization(model, "EN0-Norm", "EN0-Norm", node_names, std::shared_ptr<ActivationOp<TensorT>>(new LinearOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new LinearGradOp<TensorT>()), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size(), 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0, 0.0, false, specify_layer); } node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_hidden_0, std::shared_ptr<ActivationOp<TensorT>>(new ELUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ELUGradOp<TensorT>()), std::shared_ptr<IntegrationOp<TensorT>>(new SumOp<TensorT>()), std::shared_ptr<IntegrationErrorOp<TensorT>>(new SumErrorOp<TensorT>()), std::shared_ptr<IntegrationWeightGradOp<TensorT>>(new SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0.001, 0.1)), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size() + n_hidden_0, 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0f, 0.0f, false, specify_layer); if (add_norm) { node_names = model_builder.addNormalization(model, "EN1-Norm", "EN1-Norm", node_names, std::shared_ptr<ActivationOp<TensorT>>(new LinearOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new LinearGradOp<TensorT>()), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size(), 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0, 0.0, false, specify_layer); } node_names = model_builder.addFullyConnected(model, "Encoding", "Encoding", node_names, n_encodings, std::shared_ptr<ActivationOp<TensorT>>(new ELUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ELUGradOp<TensorT>()), std::shared_ptr<IntegrationOp<TensorT>>(new SumOp<TensorT>()), std::shared_ptr<IntegrationErrorOp<TensorT>>(new SumErrorOp<TensorT>()), std::shared_ptr<IntegrationWeightGradOp<TensorT>>(new SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0.001, 0.1)), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size() + n_encodings, 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0f, 0.0f, false, specify_layer); if (add_norm) { node_names = model_builder.addNormalization(model, "Encoding-Norm", "Encoding-Norm", node_names, std::shared_ptr<ActivationOp<TensorT>>(new LinearOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new LinearGradOp<TensorT>()), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size(), 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0, 0.0, false, specify_layer); } // Add the Decoder FC layers node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names, n_hidden_0, std::shared_ptr<ActivationOp<TensorT>>(new ELUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ELUGradOp<TensorT>()), std::shared_ptr<IntegrationOp<TensorT>>(new SumOp<TensorT>()), std::shared_ptr<IntegrationErrorOp<TensorT>>(new SumErrorOp<TensorT>()), std::shared_ptr<IntegrationWeightGradOp<TensorT>>(new SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0.001, 0.1)), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size() + n_hidden_0, 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0f, 0.0f, false, specify_layer); if (add_norm) { node_names = model_builder.addNormalization(model, "DE0-Norm", "DE0-Norm", node_names, std::shared_ptr<ActivationOp<TensorT>>(new LinearOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new LinearGradOp<TensorT>()), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size(), 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0, 0.0, false, specify_layer); } node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names, n_hidden_0, std::shared_ptr<ActivationOp<TensorT>>(new ELUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ELUGradOp<TensorT>()), std::shared_ptr<IntegrationOp<TensorT>>(new SumOp<TensorT>()), std::shared_ptr<IntegrationErrorOp<TensorT>>(new SumErrorOp<TensorT>()), std::shared_ptr<IntegrationWeightGradOp<TensorT>>(new SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0.001, 0.1)), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size() + n_hidden_0, 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0f, 0.0f, false, specify_layer); if (add_norm) { node_names = model_builder.addNormalization(model, "DE1-Norm", "DE1-Norm", node_names, std::shared_ptr<ActivationOp<TensorT>>(new LinearOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new LinearGradOp<TensorT>()), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size(), 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0, 0.0, false, specify_layer); } node_names = model_builder.addFullyConnected(model, "Output", "Output", node_names, n_inputs, std::shared_ptr<ActivationOp<TensorT>>(new ELUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ELUGradOp<TensorT>()), std::shared_ptr<IntegrationOp<TensorT>>(new SumOp<TensorT>()), std::shared_ptr<IntegrationErrorOp<TensorT>>(new SumErrorOp<TensorT>()), std::shared_ptr<IntegrationWeightGradOp<TensorT>>(new SumWeightGradOp<TensorT>()), //std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0.001, 0.1)), std::shared_ptr<WeightInitOp<TensorT>>(new RandWeightInitOp<TensorT>(node_names.size(), 2)), std::shared_ptr<SolverOp<TensorT>>(new AdamOp<TensorT>(5e-5, 0.9, 0.999, 0.1)), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); } void adaptiveTrainerScheduler( const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<float>& model_errors) { //if (n_epochs = 1000) { // // anneal the learning rate to 5e-5 //} if (n_epochs % 5000 == 0 && n_epochs != 0 ) { // save the model every 1000 epochs //model_interpreter.getModelResults(model, false, true, false); ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } } void trainingModelLogger(const int & n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const TensorT& model_error) { model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 10 == 0) { if (model_logger.getLogExpectedEpoch()) model_interpreter.getModelResults(model, true, false, false); model_logger.writeLogs(model, n_epochs, { "Error" }, {}, { model_error }, {}, output_nodes, expected_values); } } void validationModelLogger(const int & n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const TensorT& model_error) { model_logger.setLogTimeEpoch(false); model_logger.setLogTrainValMetricEpoch(false); model_logger.setLogExpectedEpoch(true); if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 1 == 0) { if (model_logger.getLogExpectedEpoch()) model_interpreter.getModelResults(model, true, false, false); model_logger.writeLogs(model, n_epochs, {}, { "Error" }, {}, { model_error }, output_nodes, expected_values); } } }; template<typename TensorT> class DataSimulatorExt : public MNISTSimulator<TensorT> { public: void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_input_pixels = this->validation_data.dimension(1); assert(n_output_nodes == n_input_pixels); assert(n_input_nodes == n_input_pixels); // make a vector of sample_indices Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, 1); // Reformat the MNIST image data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_pixels; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], nodes_iter); output_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], nodes_iter); } } } time_steps.setConstant(1.0f); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_input_pixels = this->validation_data.dimension(1); assert(n_output_nodes == n_input_pixels); assert(n_input_nodes == n_input_pixels); // make a vector of sample_indices Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, 1); // Reformat the MNIST image data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_pixels; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], nodes_iter); output_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], nodes_iter); } } } time_steps.setConstant(1.0f); } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> {}; /** @brief Pixel reconstruction MNIST example whereby all pixels are linearized and read into the model. The model then attempts to reconstruction the pixels using an Auto Encoder network Data processing: - whole image pixels (linearized) 28x28 normalized to 0 to 1 */ void main_AE(const bool& make_model, const bool& train_model) { const int n_hard_threads = std::thread::hardware_concurrency(); const int n_threads = 1; // define the populatin trainer PopulationTrainerExt<float> population_trainer; population_trainer.setNGenerations(1); population_trainer.setLogging(false); // define the population logger PopulationLogger<float> population_logger(true, true); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false); // define the data simulator const std::size_t input_size = 784; const std::size_t encoding_size = 16; const std::size_t n_hidden = 128; const std::size_t training_data_size = 60000; //60000; const std::size_t validation_data_size = 10000; //10000; DataSimulatorExt<float> data_simulator; // read in the training data std::string training_data_filename, training_labels_filename; //training_data_filename = "/home/user/data/train-images-idx3-ubyte"; //training_labels_filename = "/home/user/data/train-labels-idx1-ubyte"; training_data_filename = "C:/Users/domccl/GitHub/mnist/train-images.idx3-ubyte"; training_labels_filename = "C:/Users/domccl/GitHub/mnist/train-labels.idx1-ubyte"; //training_data_filename = "C:/Users/dmccloskey/Documents/GitHub/mnist/train-images-idx3-ubyte"; //training_labels_filename = "C:/Users/dmccloskey/Documents/GitHub/mnist/train-labels-idx1-ubyte"; data_simulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // read in the validation data std::string validation_data_filename, validation_labels_filename; //validation_data_filename = "/home/user/data/t10k-images-idx3-ubyte"; //validation_labels_filename = "/home/user/data/t10k-labels-idx1-ubyte"; validation_data_filename = "C:/Users/domccl/GitHub/mnist/t10k-images.idx3-ubyte"; validation_labels_filename = "C:/Users/domccl/GitHub/mnist/t10k-labels.idx1-ubyte"; //validation_data_filename = "C:/Users/dmccloskey/Documents/GitHub/mnist/t10k-images-idx3-ubyte"; //validation_labels_filename = "C:/Users/dmccloskey/Documents/GitHub/mnist/t10k-labels-idx1-ubyte"; data_simulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); data_simulator.unitScaleData(); // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the output nodes std::vector<std::string> output_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; //model_trainer.setBatchSize(1); // evaluation only model_trainer.setBatchSize(64); model_trainer.setNEpochsTraining(200001); model_trainer.setNEpochsValidation(25); model_trainer.setNEpochsEvaluation(100); model_trainer.setMemorySize(1); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true, true, true); model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setLossFunctions({ std::shared_ptr<LossFunctionOp<float>>(new MSELossOp<float>(1e-6, 1.0)) //std::shared_ptr<LossFunctionOp<float>>(new BCEWithLogitsLossOp<float>(1e-6, 1e-3)) }); model_trainer.setLossFunctionGrads({ std::shared_ptr<LossFunctionGradOp<float>>(new MSELossGradOp<float>(1e-6, 1.0)) //std::shared_ptr<LossFunctionGradOp<float>>(new BCEWithLogitsLossGradOp<float>(1e-6, 1e-3)) }); model_trainer.setLossOutputNodes({ output_nodes }); // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; // define the initial population std::cout << "Initializing the population..." << std::endl; Model<float> model; if (make_model) { ModelTrainerExt<float>().makeAE(model, input_size, encoding_size, n_hidden, true, false); } else { // read in the trained model std::cout << "Reading in the model..." << std::endl; const std::string data_dir = "C:/Users/domccl/Desktop/EvoNetExp/MNIST_AE_GPU/GPU2/"; const std::string model_filename = data_dir + "VAE_15000_model.binary"; const std::string interpreter_filename = data_dir + "VAE_15000_interpreter.binary"; ModelFile<float> model_file; model_file.loadModelBinary(model_filename, model); model.setId(1); model.setName("AE1"); ModelInterpreterFileDefaultDevice<float> model_interpreter_file; model_interpreter_file.loadModelInterpreterBinary(interpreter_filename, model_interpreters[0]); } std::vector<Model<float>> population = { model }; if (train_model) { // Evolve the population //std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); PopulationTrainerFile<float> population_trainer_file; population_trainer_file.storeModels(population, "MNIST"); } else { // Evaluate the population population_trainer.evaluateModels( population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, input_nodes); } } int main(int argc, char** argv) { // run the application main_AE(true, true); return 0; }<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ActivationFunction test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ActivationFunction.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(activationFunction) /** ReLUOp Tests */ BOOST_AUTO_TEST_CASE(constructorReluOp) { ReLUOp<double>* ptrReLU = nullptr; ReLUOp<double>* nullPointerReLU = nullptr; BOOST_CHECK_EQUAL(ptrReLU, nullPointerReLU); } BOOST_AUTO_TEST_CASE(destructorReluOp) { ReLUOp<double>* ptrReLU = nullptr; ptrReLU = new ReLUOp<double>(); delete ptrReLU; } BOOST_AUTO_TEST_CASE(gettersAndSettersReluOp) { // Test defaults ReLUOp<double> operation_defaults; BOOST_CHECK_CLOSE(operation_defaults.getEps(), 1e-6, 1e-6); BOOST_CHECK_CLOSE(operation_defaults.getMin(), -1e9, 1e-6); BOOST_CHECK_CLOSE(operation_defaults.getMax(), 1e9, 1e-6); // Test setters operation_defaults.setEps(10); operation_defaults.setMin(20); operation_defaults.setMax(30); BOOST_CHECK_CLOSE(operation_defaults.getEps(), 10, 1e-6); BOOST_CHECK_CLOSE(operation_defaults.getMin(), 20, 1e-6); BOOST_CHECK_CLOSE(operation_defaults.getMax(), 30, 1e-6); // Test constructor ReLUOp<double> operation(10, 20, 30); BOOST_CHECK_CLOSE(operation.getEps(), 10, 1e-6); BOOST_CHECK_CLOSE(operation.getMin(), 20, 1e-6); BOOST_CHECK_CLOSE(operation.getMax(), 30, 1e-6); } BOOST_AUTO_TEST_CASE(getNameReLUOp) { ReLUOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "ReLUOp"); } BOOST_AUTO_TEST_CASE(copyReluOp) { std::shared_ptr<ActivationOp<double>> relu_ptr_1 = std::make_shared<ReLUOp<double>>(); std::shared_ptr<ActivationOp<double>> relu_ptr_2; relu_ptr_2 = std::shared_ptr<ActivationOp<double>>(relu_ptr_1); BOOST_CHECK_EQUAL(relu_ptr_1.get(), relu_ptr_2.get()); relu_ptr_2 = std::shared_ptr<ActivationOp<double>>(relu_ptr_1.get()->copy()); BOOST_CHECK_NE(relu_ptr_1.get(), relu_ptr_2.get()); } /** ReLUGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorReluGradOp) { ReLUGradOp<double>* ptrReLUGrad = nullptr; ReLUGradOp<double>* nullPointerReLUGrad = nullptr; BOOST_CHECK_EQUAL(ptrReLUGrad, nullPointerReLUGrad); } BOOST_AUTO_TEST_CASE(destructorReluGradOp) { ReLUGradOp<double>* ptrReLUGrad = nullptr; ptrReLUGrad = new ReLUGradOp<double>(); delete ptrReLUGrad; } BOOST_AUTO_TEST_CASE(getNameReLUGradOp) { ReLUGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "ReLUGradOp"); } /** ELUOp Tests */ BOOST_AUTO_TEST_CASE(constructorEluOp) { ELUOp<double>* ptrELU = nullptr; ELUOp<double>* nullPointerELU = nullptr; BOOST_CHECK_EQUAL(ptrELU, nullPointerELU); } BOOST_AUTO_TEST_CASE(destructorEluOp) { ELUOp<double>* ptrELU = nullptr; ptrELU = new ELUOp<double>(); delete ptrELU; } BOOST_AUTO_TEST_CASE(gettersAndSettersEluOp) { ELUOp<double> operation; operation.setAlpha(1.0); BOOST_CHECK_EQUAL(operation.getAlpha(), 1.0); } BOOST_AUTO_TEST_CASE(getNameELUOp) { ELUOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "ELUOp"); } /** ELUGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorEluGradOp) { ELUGradOp<double>* ptrELU = nullptr; ELUGradOp<double>* nullPointerELU = nullptr; BOOST_CHECK_EQUAL(ptrELU, nullPointerELU); } BOOST_AUTO_TEST_CASE(destructorEluGradOp) { ELUGradOp<double>* ptrELU = nullptr; ptrELU = new ELUGradOp<double>(); delete ptrELU; } BOOST_AUTO_TEST_CASE(gettersAndSettersEluGradOp) { ELUGradOp<double> operation; operation.setAlpha(1.0); BOOST_CHECK_EQUAL(operation.getAlpha(), 1.0); } BOOST_AUTO_TEST_CASE(getNameELUGradOp) { ELUGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "ELUGradOp"); } /** SigmoidOp Tests */ BOOST_AUTO_TEST_CASE(constructorSigmoidOp) { SigmoidOp<double>* ptrSigmoid = nullptr; SigmoidOp<double>* nullPointerSigmoid = nullptr; BOOST_CHECK_EQUAL(ptrSigmoid, nullPointerSigmoid); } BOOST_AUTO_TEST_CASE(destructorSigmoidOp) { SigmoidOp<double>* ptrSigmoid = nullptr; ptrSigmoid = new SigmoidOp<double>(); delete ptrSigmoid; } BOOST_AUTO_TEST_CASE(getNameSigmoidOp) { SigmoidOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "SigmoidOp"); } /** SigmoidGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorSigmoidGradOp) { SigmoidGradOp<double>* ptrSigmoidGrad = nullptr; SigmoidGradOp<double>* nullPointerSigmoidGrad = nullptr; BOOST_CHECK_EQUAL(ptrSigmoidGrad, nullPointerSigmoidGrad); } BOOST_AUTO_TEST_CASE(destructorSigmoidGradOp) { SigmoidGradOp<double>* ptrSigmoidGrad = nullptr; ptrSigmoidGrad = new SigmoidGradOp<double>(); delete ptrSigmoidGrad; } BOOST_AUTO_TEST_CASE(getNameSigmoidGradOp) { SigmoidGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "SigmoidGradOp"); } /** TanHOp Tests */ BOOST_AUTO_TEST_CASE(constructorTanHOp) { TanHOp<double>* ptrTanH = nullptr; TanHOp<double>* nullPointerTanH = nullptr; BOOST_CHECK_EQUAL(ptrTanH, nullPointerTanH); } BOOST_AUTO_TEST_CASE(destructorTanHOp) { TanHOp<double>* ptrTanH = nullptr; ptrTanH = new TanHOp<double>(); delete ptrTanH; } BOOST_AUTO_TEST_CASE(getNameTanHOp) { TanHOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "TanHOp"); } /** TanHGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorTanHGradOp) { TanHGradOp<double>* ptrTanHGrad = nullptr; TanHGradOp<double>* nullPointerTanHGrad = nullptr; BOOST_CHECK_EQUAL(ptrTanHGrad, nullPointerTanHGrad); } BOOST_AUTO_TEST_CASE(destructorTanHGradOp) { TanHGradOp<double>* ptrTanHGrad = nullptr; ptrTanHGrad = new TanHGradOp<double>(); delete ptrTanHGrad; } BOOST_AUTO_TEST_CASE(getNameTanHGradOp) { TanHGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "TanHGradOp"); } /** ReTanHOp Tests */ BOOST_AUTO_TEST_CASE(constructorReTanHOp) { ReTanHOp<double>* ptrReTanH = nullptr; ReTanHOp<double>* nullPointerReTanH = nullptr; BOOST_CHECK_EQUAL(ptrReTanH, nullPointerReTanH); } BOOST_AUTO_TEST_CASE(destructorReTanHOp) { ReTanHOp<double>* ptrReTanH = nullptr; ptrReTanH = new ReTanHOp<double>(); delete ptrReTanH; } BOOST_AUTO_TEST_CASE(getNameReTanHOp) { ReTanHOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "ReTanHOp"); } /** ReTanHGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorReTanHGradOp) { ReTanHGradOp<double>* ptrReTanHGrad = nullptr; ReTanHGradOp<double>* nullPointerReTanHGrad = nullptr; BOOST_CHECK_EQUAL(ptrReTanHGrad, nullPointerReTanHGrad); } BOOST_AUTO_TEST_CASE(destructorReTanHGradOp) { ReTanHGradOp<double>* ptrReTanHGrad = nullptr; ptrReTanHGrad = new ReTanHGradOp<double>(); delete ptrReTanHGrad; } BOOST_AUTO_TEST_CASE(getNameReTanHGradOp) { ReTanHGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "ReTanHGradOp"); } /** LinearOp Tests */ BOOST_AUTO_TEST_CASE(constructorLinearOp) { LinearOp<double>* ptrLinear = nullptr; LinearOp<double>* nullPointerLinear = nullptr; BOOST_CHECK_EQUAL(ptrLinear, nullPointerLinear); } BOOST_AUTO_TEST_CASE(destructorLinearOp) { LinearOp<double>* ptrLinear = nullptr; ptrLinear = new LinearOp<double>(); delete ptrLinear; } BOOST_AUTO_TEST_CASE(getNameLinearOp) { LinearOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "LinearOp"); } /** LinearGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorLinearGradOp) { LinearGradOp<double>* ptrLinearGrad = nullptr; LinearGradOp<double>* nullPointerLinearGrad = nullptr; BOOST_CHECK_EQUAL(ptrLinearGrad, nullPointerLinearGrad); } BOOST_AUTO_TEST_CASE(destructorLinearGradOp) { LinearGradOp<double>* ptrLinearGrad = nullptr; ptrLinearGrad = new LinearGradOp<double>(); delete ptrLinearGrad; } BOOST_AUTO_TEST_CASE(getNameLinearGradOp) { LinearGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "LinearGradOp"); } /** InverseOp Tests */ BOOST_AUTO_TEST_CASE(constructorInverseOp) { InverseOp<double>* ptrInverse = nullptr; InverseOp<double>* nullPointerInverse = nullptr; BOOST_CHECK_EQUAL(ptrInverse, nullPointerInverse); } BOOST_AUTO_TEST_CASE(destructorInverseOp) { InverseOp<double>* ptrInverse = nullptr; ptrInverse = new InverseOp<double>(); delete ptrInverse; } BOOST_AUTO_TEST_CASE(getNameInverseOp) { InverseOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "InverseOp"); } /** InverseGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorInverseGradOp) { InverseGradOp<double>* ptrInverseGrad = nullptr; InverseGradOp<double>* nullPointerInverseGrad = nullptr; BOOST_CHECK_EQUAL(ptrInverseGrad, nullPointerInverseGrad); } BOOST_AUTO_TEST_CASE(destructorInverseGradOp) { InverseGradOp<double>* ptrInverseGrad = nullptr; ptrInverseGrad = new InverseGradOp<double>(); delete ptrInverseGrad; } BOOST_AUTO_TEST_CASE(getNameInverseGradOp) { InverseGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "InverseGradOp"); } /** ExponentialOp Tests */ BOOST_AUTO_TEST_CASE(constructorExponentialOp) { ExponentialOp<double>* ptrExponential = nullptr; ExponentialOp<double>* nullPointerExponential = nullptr; BOOST_CHECK_EQUAL(ptrExponential, nullPointerExponential); } BOOST_AUTO_TEST_CASE(destructorExponentialOp) { ExponentialOp<double>* ptrExponential = nullptr; ptrExponential = new ExponentialOp<double>(); delete ptrExponential; } BOOST_AUTO_TEST_CASE(getNameExponentialOp) { ExponentialOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "ExponentialOp"); } /** ExponentialGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorExponentialGradOp) { ExponentialGradOp<double>* ptrExponentialGrad = nullptr; ExponentialGradOp<double>* nullPointerExponentialGrad = nullptr; BOOST_CHECK_EQUAL(ptrExponentialGrad, nullPointerExponentialGrad); } BOOST_AUTO_TEST_CASE(destructorExponentialGradOp) { ExponentialGradOp<double>* ptrExponentialGrad = nullptr; ptrExponentialGrad = new ExponentialGradOp<double>(); delete ptrExponentialGrad; } BOOST_AUTO_TEST_CASE(getNameExponentialGradOp) { ExponentialGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "ExponentialGradOp"); } /** LogOp Tests */ BOOST_AUTO_TEST_CASE(constructorLogOp) { LogOp<double>* ptrLog = nullptr; LogOp<double>* nullPointerLog = nullptr; BOOST_CHECK_EQUAL(ptrLog, nullPointerLog); } BOOST_AUTO_TEST_CASE(destructorLogOp) { LogOp<double>* ptrLog = nullptr; ptrLog = new LogOp<double>(); delete ptrLog; } BOOST_AUTO_TEST_CASE(getNameLogOp) { LogOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "LogOp"); } /** LogGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorLogGradOp) { LogGradOp<double>* ptrLogGrad = nullptr; LogGradOp<double>* nullPointerLogGrad = nullptr; BOOST_CHECK_EQUAL(ptrLogGrad, nullPointerLogGrad); } BOOST_AUTO_TEST_CASE(destructorLogGradOp) { LogGradOp<double>* ptrLogGrad = nullptr; ptrLogGrad = new LogGradOp<double>(); delete ptrLogGrad; } BOOST_AUTO_TEST_CASE(getNameLogGradOp) { LogGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "LogGradOp"); } /** PowOp Tests */ BOOST_AUTO_TEST_CASE(constructorPowOp) { PowOp<double>* ptrPow = nullptr; PowOp<double>* nullPointerPow = nullptr; BOOST_CHECK_EQUAL(ptrPow, nullPointerPow); } BOOST_AUTO_TEST_CASE(destructorPowOp) { PowOp<double>* ptrPow = nullptr; ptrPow = new PowOp<double>(2); delete ptrPow; } BOOST_AUTO_TEST_CASE(getNamePowOp) { PowOp<double> operation(0.5); BOOST_CHECK_EQUAL(operation.getName(), "PowOp"); } /** PowGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorPowGradOp) { PowGradOp<double>* ptrPowGrad = nullptr; PowGradOp<double>* nullPointerPowGrad = nullptr; BOOST_CHECK_EQUAL(ptrPowGrad, nullPointerPowGrad); } BOOST_AUTO_TEST_CASE(destructorPowGradOp) { PowGradOp<double>* ptrPowGrad = nullptr; ptrPowGrad = new PowGradOp<double>(0.5); delete ptrPowGrad; } BOOST_AUTO_TEST_CASE(getNamePowGradOp) { PowGradOp<double> operation(0.5); BOOST_CHECK_EQUAL(operation.getName(), "PowGradOp"); } /** LeakyReLUOp Tests */ BOOST_AUTO_TEST_CASE(constructorLeakyReLUOp) { LeakyReLUOp<double>* ptrLeakyReLU = nullptr; LeakyReLUOp<double>* nullPointerLeakyReLU = nullptr; BOOST_CHECK_EQUAL(ptrLeakyReLU, nullPointerLeakyReLU); } BOOST_AUTO_TEST_CASE(destructorLeakyReLUOp) { LeakyReLUOp<double>* ptrLeakyReLU = nullptr; ptrLeakyReLU = new LeakyReLUOp<double>(); delete ptrLeakyReLU; } BOOST_AUTO_TEST_CASE(gettersAndSettersLeakyReLUOp) { LeakyReLUOp<double> operation; operation.setAlpha(1.0); BOOST_CHECK_EQUAL(operation.getAlpha(), 1.0); } BOOST_AUTO_TEST_CASE(getNameLeakyReLUOp) { LeakyReLUOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "LeakyReLUOp"); } /** LeakyReLUGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorLeakyReLUGradOp) { LeakyReLUGradOp<double>* ptrLeakyReLU = nullptr; LeakyReLUGradOp<double>* nullPointerLeakyReLU = nullptr; BOOST_CHECK_EQUAL(ptrLeakyReLU, nullPointerLeakyReLU); } BOOST_AUTO_TEST_CASE(destructorLeakyReLUGradOp) { LeakyReLUGradOp<double>* ptrLeakyReLU = nullptr; ptrLeakyReLU = new LeakyReLUGradOp<double>(); delete ptrLeakyReLU; } BOOST_AUTO_TEST_CASE(gettersAndSettersLeakyReLUGradOp) { LeakyReLUGradOp<double> operation; operation.setAlpha(1.0); BOOST_CHECK_EQUAL(operation.getAlpha(), 1.0); } BOOST_AUTO_TEST_CASE(getNameLeakyReLUGradOp) { LeakyReLUGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "LeakyReLUGradOp"); } /** SinOp Tests */ BOOST_AUTO_TEST_CASE(constructorSinOp) { SinOp<double>* ptrSin = nullptr; SinOp<double>* nullPointerSin = nullptr; BOOST_CHECK_EQUAL(ptrSin, nullPointerSin); } BOOST_AUTO_TEST_CASE(destructorSinOp) { SinOp<double>* ptrSin = nullptr; ptrSin = new SinOp<double>(); delete ptrSin; } BOOST_AUTO_TEST_CASE(getNameSinOp) { SinOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "SinOp"); } /** SinGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorSinGradOp) { SinGradOp<double>* ptrSinGrad = nullptr; SinGradOp<double>* nullPointerSinGrad = nullptr; BOOST_CHECK_EQUAL(ptrSinGrad, nullPointerSinGrad); } BOOST_AUTO_TEST_CASE(destructorSinGradOp) { SinGradOp<double>* ptrSinGrad = nullptr; ptrSinGrad = new SinGradOp<double>(); delete ptrSinGrad; } BOOST_AUTO_TEST_CASE(getNameSinGradOp) { SinGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "SinGradOp"); } /** CosOp Tests */ BOOST_AUTO_TEST_CASE(constructorCosOp) { CosOp<double>* ptrCos = nullptr; CosOp<double>* nullPointerCos = nullptr; BOOST_CHECK_EQUAL(ptrCos, nullPointerCos); } BOOST_AUTO_TEST_CASE(destructorCosOp) { CosOp<double>* ptrCos = nullptr; ptrCos = new CosOp<double>(); delete ptrCos; } BOOST_AUTO_TEST_CASE(getNameCosOp) { CosOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "CosOp"); } /** CosGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorCosGradOp) { CosGradOp<double>* ptrCosGrad = nullptr; CosGradOp<double>* nullPointerCosGrad = nullptr; BOOST_CHECK_EQUAL(ptrCosGrad, nullPointerCosGrad); } BOOST_AUTO_TEST_CASE(destructorCosGradOp) { CosGradOp<double>* ptrCosGrad = nullptr; ptrCosGrad = new CosGradOp<double>(); delete ptrCosGrad; } BOOST_AUTO_TEST_CASE(getNameCosGradOp) { CosGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "CosGradOp"); } /** BatchNormOp Tests */ BOOST_AUTO_TEST_CASE(constructorBatchNormOp) { BatchNormOp<double>* ptrBatchNorm = nullptr; BatchNormOp<double>* nullPointerBatchNorm = nullptr; BOOST_CHECK_EQUAL(ptrBatchNorm, nullPointerBatchNorm); } BOOST_AUTO_TEST_CASE(destructorBatchNormOp) { BatchNormOp<double>* ptrBatchNorm = nullptr; ptrBatchNorm = new BatchNormOp<double>(); delete ptrBatchNorm; } BOOST_AUTO_TEST_CASE(getNameBatchNormOp) { BatchNormOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "BatchNormOp"); } /** BatchNormGradOp Tests */ BOOST_AUTO_TEST_CASE(constructorBatchNormGradOp) { BatchNormGradOp<double>* ptrBatchNormGrad = nullptr; BatchNormGradOp<double>* nullPointerBatchNormGrad = nullptr; BOOST_CHECK_EQUAL(ptrBatchNormGrad, nullPointerBatchNormGrad); } BOOST_AUTO_TEST_CASE(destructorBatchNormGradOp) { BatchNormGradOp<double>* ptrBatchNormGrad = nullptr; ptrBatchNormGrad = new BatchNormGradOp<double>(); delete ptrBatchNormGrad; } BOOST_AUTO_TEST_CASE(getNameBatchNormGradOp) { BatchNormGradOp<double> operation; BOOST_CHECK_EQUAL(operation.getName(), "BatchNormGradOp"); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelErrorData test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelErrorData.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(ModelErrorData1) BOOST_AUTO_TEST_CASE(constructor) { ModelErrorDataCpu<float>* ptr = nullptr; ModelErrorDataCpu<float>* nullPointer = nullptr; ptr = new ModelErrorDataCpu<float>(); BOOST_CHECK_NE(ptr, nullPointer); delete ptr; } BOOST_AUTO_TEST_CASE(destructor) { ModelErrorDataCpu<float>* ptr = nullptr; ptr = new ModelErrorDataCpu<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(comparison) { ModelErrorDataCpu<float> error, error_test; BOOST_CHECK(error == error_test); } #if COMPILE_WITH_CUDA BOOST_AUTO_TEST_CASE(gettersAndSetters2) { ModelErrorDataGpu<float> error; error.setBatchSize(2); error.setMemorySize(3); error.setNMetrics(4); Eigen::Tensor<float, 2> error_tensor(2, 3), metric(4, 3); error_tensor.setConstant(3); metric.setConstant(4); error.setError(error_tensor); error.setMetric(metric); BOOST_CHECK_EQUAL(error.getBatchSize(), 2); BOOST_CHECK_EQUAL(error.getMemorySize(), 3); BOOST_CHECK_EQUAL(error.getNMetrics(), 4); BOOST_CHECK_EQUAL(error.getError()(0, 0), 3); BOOST_CHECK(error.getErrorStatus().first); BOOST_CHECK(!error.getErrorStatus().second); BOOST_CHECK_EQUAL(error.getMetric()(0, 0), 4); BOOST_CHECK(error.getMetricStatus().first); BOOST_CHECK(!error.getMetricStatus().second); // Test mutability error.getError()(0, 0) = 8; error.getMetric()(0, 0) = 9; BOOST_CHECK_EQUAL(error.getError()(0, 0), 8); BOOST_CHECK_EQUAL(error.getMetric()(0, 0), 9); } BOOST_AUTO_TEST_CASE(syncHAndD2) { ModelErrorDataGpu<float> error; error.setBatchSize(2); error.setMemorySize(3); error.setNMetrics(4); Eigen::Tensor<float, 2> error_tensor(2, 3), metric(4, 3); error_tensor.setConstant(3); metric.setConstant(4); error.setError(error_tensor); error.setMetric(metric); Eigen::GpuStreamDevice stream_device; Eigen::GpuDevice device(&stream_device); error.syncHAndDError(device); error.syncHAndDMetric(device); BOOST_CHECK(!error.getErrorStatus().first); BOOST_CHECK(error.getErrorStatus().second); BOOST_CHECK(!error.getMetricStatus().first); BOOST_CHECK(error.getMetricStatus().second); error.syncHAndDError(device); error.syncHAndDMetric(device); BOOST_CHECK(error.getErrorStatus().first); BOOST_CHECK(!error.getErrorStatus().second); BOOST_CHECK(error.getMetricStatus().first); BOOST_CHECK(!error.getMetricStatus().second); } #endif BOOST_AUTO_TEST_CASE(gettersAndSetters) { ModelErrorDataCpu<float> error; error.setBatchSize(2); error.setMemorySize(3); error.setNMetrics(4); size_t test_error = 2 * 3 * sizeof(float); size_t test_metric = 3 * 4 * sizeof(float); BOOST_CHECK_EQUAL(error.getErrorTensorSize(), test_error); BOOST_CHECK_EQUAL(error.getMetricTensorSize(), test_metric); } BOOST_AUTO_TEST_CASE(gettersAndSetters1) { ModelErrorDataCpu<float> error; error.setBatchSize(2); error.setMemorySize(3); error.setNMetrics(4); Eigen::Tensor<float, 2> error_tensor(2, 3), metric(4, 3); error_tensor.setConstant(3); metric.setConstant(4); error.setError(error_tensor); error.setMetric(metric); BOOST_CHECK_EQUAL(error.getBatchSize(), 2); BOOST_CHECK_EQUAL(error.getMemorySize(), 3); BOOST_CHECK_EQUAL(error.getNMetrics(), 4); BOOST_CHECK_EQUAL(error.getError()(0, 0), 3); BOOST_CHECK(error.getErrorStatus().first); BOOST_CHECK(error.getErrorStatus().second); BOOST_CHECK_EQUAL(error.getMetric()(0, 0), 4); BOOST_CHECK(error.getMetricStatus().first); BOOST_CHECK(error.getMetricStatus().second); // Test mutability error.getError()(0, 0) = 8; error.getMetric()(0, 0) = 9; BOOST_CHECK_EQUAL(error.getError()(0, 0), 8); BOOST_CHECK_EQUAL(error.getMetric()(0, 0), 9); } BOOST_AUTO_TEST_CASE(syncHAndD) { ModelErrorDataCpu<float> error; error.setBatchSize(2); error.setMemorySize(3); error.setNMetrics(4); Eigen::Tensor<float, 2> error_tensor(2, 3), metric(4, 3); error_tensor.setConstant(3); metric.setConstant(4); error.setError(error_tensor); error.setMetric(metric); Eigen::DefaultDevice device;; error.syncHAndDError(device); error.syncHAndDMetric(device); BOOST_CHECK(error.getErrorStatus().first); BOOST_CHECK(error.getErrorStatus().second); BOOST_CHECK(error.getMetricStatus().first); BOOST_CHECK(error.getMetricStatus().second); error.syncHAndDError(device); error.syncHAndDMetric(device); BOOST_CHECK(error.getErrorStatus().first); BOOST_CHECK(error.getErrorStatus().second); BOOST_CHECK(error.getMetricStatus().first); BOOST_CHECK(error.getMetricStatus().second); } BOOST_AUTO_TEST_CASE(initModelErrorData) { ModelErrorDataCpu<float> error; error.initModelErrorData(2, 5, 4); // Test the batch and memory sizes BOOST_CHECK_EQUAL(error.getBatchSize(), 2); BOOST_CHECK_EQUAL(error.getMemorySize(), 5); BOOST_CHECK_EQUAL(error.getNMetrics(), 4); BOOST_CHECK_EQUAL(error.getError()(0, 0), 0.0); BOOST_CHECK_EQUAL(error.getError()(1, 4), 0.0); BOOST_CHECK_EQUAL(error.getMetric()(0, 0), 0.0); BOOST_CHECK_EQUAL(error.getMetric()(3, 4), 0.0); } BOOST_AUTO_TEST_SUITE_END()<file_sep>Using EvoNet ============================================================================= EvoNet is currently a command line application built around particular examples and use cases. <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE WeightInit test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/WeightInit.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(weightInit) /** RandWeightInitOp Tests */ BOOST_AUTO_TEST_CASE(constructorRandWeightInitOp) { RandWeightInitOp<float>* ptrRandWeightInit = nullptr; RandWeightInitOp<float>* nullPointerRandWeightInit = nullptr; BOOST_CHECK_EQUAL(ptrRandWeightInit, nullPointerRandWeightInit); } BOOST_AUTO_TEST_CASE(destructorRandWeightInitOp) { RandWeightInitOp<float>* ptrRandWeightInit = nullptr; ptrRandWeightInit = new RandWeightInitOp<float>(); delete ptrRandWeightInit; } BOOST_AUTO_TEST_CASE(operationfunctionRandWeightInitOp) { RandWeightInitOp<float> operation(1.0, 2.0); operation = RandWeightInitOp<float>(0); BOOST_CHECK_NE(operation(), 0); operation = RandWeightInitOp<float>(1); BOOST_CHECK_NE(operation(), 1); operation = RandWeightInitOp<float>(10); BOOST_CHECK_NE(operation(), 10); operation = RandWeightInitOp<float>(100); BOOST_CHECK_NE(operation(), 100); } BOOST_AUTO_TEST_CASE(settersAndGettersRandWeightInitOp) { RandWeightInitOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "RandWeightInitOp"); BOOST_CHECK_EQUAL(operation.getParamsAsStr(), "n:1.000000;f:1.000000"); } /** ConstWeightInitOp Tests */ BOOST_AUTO_TEST_CASE(constructorConstWeightInitOp) { ConstWeightInitOp<float>* ptrConstWeightInit = nullptr; ConstWeightInitOp<float>* nullPointerConstWeightInit = nullptr; BOOST_CHECK_EQUAL(ptrConstWeightInit, nullPointerConstWeightInit); } BOOST_AUTO_TEST_CASE(destructorConstWeightInitOp) { ConstWeightInitOp<float>* ptrConstWeightInit = nullptr; ptrConstWeightInit = new ConstWeightInitOp<float>(); delete ptrConstWeightInit; } BOOST_AUTO_TEST_CASE(operationfunctionConstWeightInitOp) { ConstWeightInitOp<float> operation(1); BOOST_CHECK_CLOSE(operation(), 1, 1e-6); } BOOST_AUTO_TEST_CASE(settersAndGettersConstWeightInitOp) { ConstWeightInitOp<float> operation; BOOST_CHECK_EQUAL(operation.getName(), "ConstWeightInitOp"); BOOST_CHECK_EQUAL(operation.getParamsAsStr(), "n:1.000000"); } /** RangeWeightInitOp Tests */ BOOST_AUTO_TEST_CASE(constructorRangeWeightInitOp) { RangeWeightInitOp<float>* ptrRangeWeightInit = nullptr; RangeWeightInitOp<float>* nullPointerRangeWeightInit = nullptr; BOOST_CHECK_EQUAL(ptrRangeWeightInit, nullPointerRangeWeightInit); } BOOST_AUTO_TEST_CASE(destructorRangeWeightInitOp) { RangeWeightInitOp<float>* ptrRangeWeightInit = nullptr; ptrRangeWeightInit = new RangeWeightInitOp<float>(); delete ptrRangeWeightInit; } BOOST_AUTO_TEST_CASE(operationfunctionRangeWeightInitOp) { RangeWeightInitOp<float> operation; operation = RangeWeightInitOp<float>(0, 0); BOOST_CHECK_EQUAL(operation(), 0); operation = RangeWeightInitOp<float>(0, 1); float value = operation(); BOOST_CHECK(value >= 0 && value <= 1); } BOOST_AUTO_TEST_CASE(settersAndGettersRangeWeightInitOp) { RangeWeightInitOp<float> operation(-1,1); BOOST_CHECK_EQUAL(operation.getName(), "RangeWeightInitOp"); BOOST_CHECK_EQUAL(operation.getParamsAsStr(), "lb:-1.000000;ub:1.000000"); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELBUILDEREXPERIMENTAL_H #define EVONET_MODELBUILDEREXPERIMENTAL_H // .h #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/simulator/BiochemicalReaction.h> // AddBiochemicalReactions #include <unsupported/Eigen/CXX11/Tensor> // .cpp #include <EvoNet/core/Preprocessing.h> namespace EvoNet { /** @brief Class to help create complex network models NOTE: the ModelInterpreter class arranges the Tensor layers according to node name ascending order. Therefore, the node name indices are buffered with 0's of length 12 to ensure proper sorting of nodes within a tensor layer. */ template<typename TensorT> class ModelBuilderExperimental: public ModelBuilder<TensorT> { public: ModelBuilderExperimental() = default; ///< Default constructor ~ModelBuilderExperimental() = default; ///< Default destructor /* @brief Convert and add Biochemical reactions to the network model EXPERIMENTAL @param[in, out] Model @param[in] biochemicalReaction The set of biochemical reactions to convert and add @param[in] name Base node names @param[in] module_name Module name @param[in] weight_init The weight initialization for learnable parameters @param[in] solver The solver for learnable parameters **/ void addBiochemicalReactionsSequencialMin(Model<TensorT> & model, const BiochemicalReactions& biochemicalReactions, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const int& version, bool specify_layers = false, bool specify_cycles = false); void addReactantsSequentialMin_1(Model<TensorT> & model, const BiochemicalReaction& reaction, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, std::string& enzyme_complex_name, std::string& enzyme_complex_name_tmp1, std::string& enzyme_complex_name_tmp2, std::string& enzyme_complex_name_result, const bool& is_reverse, bool specify_layers = false, bool specify_cycles = false); void addProductsSequentialMin_1(Model<TensorT> & model, const BiochemicalReaction& reaction, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, std::string& enzyme_complex_name, std::string& enzyme_complex_name_tmp1, std::string& enzyme_complex_name_tmp2, std::string& enzyme_complex_name_result, const bool& is_reverse, bool specify_layers = false, bool specify_cycles = false); void addReactantsSequentialMin_2(Model<TensorT> & model, const BiochemicalReaction& reaction, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, std::string& enzyme_complex_name, std::string& enzyme_complex_name_tmp1, std::string& enzyme_complex_name_tmp2, std::string& enzyme_complex_name_result, const bool& is_reverse, bool specify_layers = false, bool specify_cycles = false); void addProductsSequentialMin_2(Model<TensorT> & model, const BiochemicalReaction& reaction, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, std::string& enzyme_complex_name, std::string& enzyme_complex_name_tmp1, std::string& enzyme_complex_name_tmp2, std::string& enzyme_complex_name_result, const bool& is_reverse, bool specify_layers = false, bool specify_cycles = false); /* @brief Convert and add Biochemical reactions to the network model EXPERIMENTAL TODO: add tests @param[in, out] Model @param[in] biochemicalReaction The set of biochemical reactions to convert and add @param[in] name Base node names @param[in] module_name Module name @param[in] weight_init The weight initialization for learnable parameters (Not used) @param[in] solver The solver for learnable parameters @param[in] add_biases @param[in] specify_layers @param[in] add_met_self_links Add forward/reverse links between met(t) and met(t+1) **/ void addBiochemicalReactionsMLP(Model<TensorT> & model, const BiochemicalReactions& biochemicalReactions, const std::string & module_name, const std::vector<int>& n_fc, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const bool& add_biases, const bool& specify_layers, const bool& add_met_self_links); void addReactantsMLP_1(Model<TensorT> & model, const BiochemicalReaction& reaction, const std::vector<int>& n_fc, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const bool& add_biases, const bool& specify_layers, const bool& is_reverse); void addReactantsSequencialMin_1(Model<TensorT> & model, const BiochemicalReaction& reaction, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>> & weight_init, const std::shared_ptr<SolverOp<TensorT>> & solver, const bool& add_biases, const bool& specify_layers, const bool& is_reverse); }; template<typename TensorT> inline void ModelBuilderExperimental<TensorT>::addBiochemicalReactionsSequencialMin(Model<TensorT>& model, const BiochemicalReactions& biochemicalReactions, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const int& version, bool specify_layers, bool specify_cycles) { for (const auto& biochemicalReaction : biochemicalReactions) { if (!biochemicalReaction.second.used) continue; // Skip specified reactions // intialize the enzyme complex names std::string enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result; // parse the reactants if (version == 1) addReactantsSequentialMin_1(model, biochemicalReaction.second, name, module_name, weight_init, solver, enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result, false, specify_layers, specify_cycles); else if (version == 2) addReactantsSequentialMin_2(model, biochemicalReaction.second, name, module_name, weight_init, solver, enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result, false, specify_layers, specify_cycles); // parse the products if (version == 1) addProductsSequentialMin_1(model, biochemicalReaction.second, name, module_name, weight_init, solver, enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result, false, specify_layers, specify_cycles); else if (version == 2) addProductsSequentialMin_2(model, biochemicalReaction.second, name, module_name, weight_init, solver, enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result, false, specify_layers, specify_cycles); if (biochemicalReaction.second.reversibility) { // flip the products and reactants and repeat the above BiochemicalReaction reverse_reaction = biochemicalReaction.second; reverse_reaction.products_ids = biochemicalReaction.second.reactants_ids; reverse_reaction.products_stoichiometry = biochemicalReaction.second.reactants_stoichiometry; reverse_reaction.reactants_ids = biochemicalReaction.second.products_ids; reverse_reaction.reactants_stoichiometry = biochemicalReaction.second.products_stoichiometry; // initialize the comples names std::string enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result; // parse the reactants if (version == 1) addReactantsSequentialMin_1(model, reverse_reaction, name, module_name, weight_init, solver, enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result, true, specify_layers, specify_cycles); else if (version == 2) addReactantsSequentialMin_1(model, reverse_reaction, name, module_name, weight_init, solver, enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result, true, specify_layers, specify_cycles); // parse the products if (version == 1) addProductsSequentialMin_1(model, reverse_reaction, name, module_name, weight_init, solver, enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result, true, specify_layers, specify_cycles); else if (version == 2) addProductsSequentialMin_2(model, reverse_reaction, name, module_name, weight_init, solver, enzyme_complex_name, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, enzyme_complex_name_result, true, specify_layers, specify_cycles); } } } template<typename TensorT> inline void ModelBuilderExperimental<TensorT>::addReactantsSequentialMin_1(Model<TensorT>& model, const BiochemicalReaction & reaction, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, std::string & enzyme_complex_name, std::string & enzyme_complex_name_tmp1, std::string & enzyme_complex_name_tmp2, std::string & enzyme_complex_name_result, const bool& is_reverse, bool specify_layers, bool specify_cycles) { if (is_reverse) enzyme_complex_name = reaction.reaction_id + "_reverse"; else enzyme_complex_name = reaction.reaction_id; for (int i = 0; i < reaction.reactants_ids.size(); ++i) { for (int stoich = 0; stoich < std::abs(reaction.reactants_stoichiometry[i]); ++stoich) { enzyme_complex_name_tmp1 = enzyme_complex_name + ":" + reaction.reactants_ids[i]; enzyme_complex_name_tmp2 = enzyme_complex_name + "::" + reaction.reactants_ids[i]; enzyme_complex_name_result = enzyme_complex_name + "&" + reaction.reactants_ids[i]; // Add the nodes for the enzyme complex, enzyme complex tmp, reactant, and enzyme complex result Node<TensorT> enzyme_complex(enzyme_complex_name, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex.setModuleName(module_name); //if (specify_layers) enzyme_complex.setLayerName(module_name + "-" + enzyme_complex_name + "-Enz"); if (specify_layers) enzyme_complex.setLayerName(module_name + "-Enz"); Node<TensorT> enzyme_complex_tmp1(enzyme_complex_name_tmp1, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<MinOp<TensorT>>(MinOp<TensorT>()), std::make_shared<MinErrorOp<TensorT>>(MinErrorOp<TensorT>()), std::make_shared<MinWeightGradOp<TensorT>>(MinWeightGradOp<TensorT>())); enzyme_complex_tmp1.setModuleName(module_name); //if (specify_layers) enzyme_complex_tmp1.setLayerName(module_name + "-" + enzyme_complex_name + "-EnzTmp1"); if (specify_layers) enzyme_complex_tmp1.setLayerName(module_name + "-EnzTmp1"); Node<TensorT> enzyme_complex_tmp2(enzyme_complex_name_tmp2, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex_tmp2.setModuleName(module_name); //if (specify_layers) enzyme_complex_tmp2.setLayerName(module_name + "-" + enzyme_complex_name + "-EnzTmp2"); if (specify_layers) enzyme_complex_tmp2.setLayerName(module_name + "-EnzTmp2"); Node<TensorT> reactant(reaction.reactants_ids[i], NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); reactant.setModuleName(module_name); //if (specify_layers) reactant.setLayerName(module_name + "-" + reaction.reactants_ids[i] + "-" + "-Met"); if (specify_layers) reactant.setLayerName(module_name + "-Met"); Node<TensorT> enzyme_complex_result(enzyme_complex_name_result, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex_result.setModuleName(module_name); //if (specify_layers) enzyme_complex_result.setLayerName(module_name + "-" + enzyme_complex_name + "-Result"); if (specify_layers) enzyme_complex_result.setLayerName(module_name + "-Result"); // Add the enzyme to complex link and weight std::string weight_name_1 = enzyme_complex_name + "_to_" + enzyme_complex_name_tmp1; Weight<TensorT> weight1(weight_name_1, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight1.setModuleName(module_name); if (specify_layers) weight1.setLayerName(module_name + "-Enz_to_EnzTmp1"); Link link1(weight_name_1, enzyme_complex_name, enzyme_complex_name_tmp1, weight_name_1); link1.setModuleName(module_name); // Add the reactant to complex link and weight std::string weight_name_2 = reaction.reactants_ids[i] + "_to_" + enzyme_complex_name_tmp1; Weight<TensorT> weight2(weight_name_2, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight2.setModuleName(module_name); if (specify_layers) weight2.setLayerName(module_name + "-Met_to_EnzTmp1"); Link link2(weight_name_2, reaction.reactants_ids[i], enzyme_complex_name_tmp1, weight_name_2); link2.setModuleName(module_name); // Add the reactant to complex link and weight std::string weight_name_3 = enzyme_complex_name_tmp1 + "_to_" + enzyme_complex_name_tmp2; Weight<TensorT> weight3(weight_name_3, weight_init, solver); weight3.setModuleName(module_name); if (specify_layers) weight3.setLayerName(module_name + "-EnzTmp1_to_EnzTmp2"); Link link3(weight_name_3, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, weight_name_3); link3.setModuleName(module_name); // Add the enzyme loss pseudo link and weight std::string weight_name_4 = enzyme_complex_name_tmp2 + "_to_" + enzyme_complex_name; Weight<TensorT> weight4(weight_name_4, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(-1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight4.setModuleName(module_name); if (specify_layers) weight4.setLayerName(module_name + "-EnzTmp2_to_Enz"); Link link4(weight_name_4, enzyme_complex_name_tmp2, enzyme_complex_name, weight_name_4); link4.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, enzyme_complex_name)); // Add the reactant loss pseudo link and weight std::string weight_name_5 = enzyme_complex_name_tmp2 + "_to_" + reaction.reactants_ids[i]; Weight<TensorT> weight5(weight_name_5, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(-1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight5.setModuleName(module_name); if (specify_layers) weight5.setLayerName(module_name + "-EnzTmp2_to_Met"); Link link5(weight_name_5, enzyme_complex_name_tmp2, reaction.reactants_ids[i], weight_name_5); link5.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, reaction.reactants_ids[i])); // Add the result enzyme complex link and weight std::string weight_name_result = enzyme_complex_name_tmp2 + "_to_" + enzyme_complex_name_result; Weight<TensorT> weight_result(weight_name_result, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight_result.setModuleName(module_name); if (specify_layers) weight_result.setLayerName(module_name + "-EnzTmp2_to_Result"); Link link_result(weight_name_result, enzyme_complex_name_tmp2, enzyme_complex_name_result, weight_name_result); link_result.setModuleName(module_name); // Add all of the nodes, links, and weights to the model model.addNodes({ enzyme_complex, enzyme_complex_tmp1, reactant, enzyme_complex_tmp2, enzyme_complex_result }); model.addLinks({ link1, link2, link3, link4, link5, link_result }); model.addWeights({ weight1, weight2, weight3, weight4, weight5, weight_result }); // Update the enzyme complex name with the result enzyme_complex_name = enzyme_complex_name_result; } } } template<typename TensorT> inline void ModelBuilderExperimental<TensorT>::addProductsSequentialMin_1(Model<TensorT>& model, const BiochemicalReaction & reaction, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, std::string & enzyme_complex_name, std::string & enzyme_complex_name_tmp1, std::string & enzyme_complex_name_tmp2, std::string & enzyme_complex_name_result, const bool& is_reverse, bool specify_layers, bool specify_cycles) { // make the products enzyme complex name std::vector<std::string> enzyme_complex_names_tmp1, enzyme_complex_names_tmp2, enzyme_complex_names_result; if (is_reverse) { enzyme_complex_names_tmp1.push_back(reaction.reaction_id + "_reverse"); enzyme_complex_names_tmp2.push_back(reaction.reaction_id + "_reverse"); enzyme_complex_names_result.push_back(reaction.reaction_id + "_reverse"); } else { enzyme_complex_names_tmp1.push_back(reaction.reaction_id); enzyme_complex_names_tmp2.push_back(reaction.reaction_id); enzyme_complex_names_result.push_back(reaction.reaction_id); } for (int i = reaction.products_ids.size() - 1; i >= 0; --i) { for (int stoich = 0; stoich < std::abs(reaction.products_stoichiometry[i]); ++stoich) { enzyme_complex_names_tmp1.push_back(enzyme_complex_names_result.back() + "::" + reaction.products_ids[i]); enzyme_complex_names_tmp2.push_back(enzyme_complex_names_result.back() + ":" + reaction.products_ids[i]); enzyme_complex_names_result.push_back(enzyme_complex_names_result.back() + "&" + reaction.products_ids[i]); } } // parse the products for (int i = 0; i < reaction.products_ids.size(); ++i) { for (int stoich = 0; stoich < std::abs(reaction.products_stoichiometry[i]); ++stoich) { enzyme_complex_name_tmp1 = enzyme_complex_names_tmp1[enzyme_complex_names_tmp1.size() - 1 - i]; enzyme_complex_name_tmp2 = enzyme_complex_names_tmp2[enzyme_complex_names_tmp2.size() - 1 - i]; enzyme_complex_name_result = enzyme_complex_names_result[enzyme_complex_names_result.size() - 2 - i]; //// Experimental //if (i == reaction.products_ids.size() - 1) // enzyme_complex_name_result = enzyme_complex_names_result[enzyme_complex_names_result.size() - 2 - i] + "_inactivated"; // Add the nodes for the enzyme complex, enzyme complex tmp, product, and enzyme complex result Node<TensorT> enzyme_complex(enzyme_complex_name, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex.setModuleName(module_name); //if (specify_layers) enzyme_complex.setLayerName(module_name + "-" + enzyme_complex_name + "-Enz"); if (specify_layers) enzyme_complex.setLayerName(module_name + "-Enz"); Node<TensorT> enzyme_complex_tmp1(enzyme_complex_name_tmp1, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<MinOp<TensorT>>(MinOp<TensorT>()), std::make_shared<MinErrorOp<TensorT>>(MinErrorOp<TensorT>()), std::make_shared<MinWeightGradOp<TensorT>>(MinWeightGradOp<TensorT>())); enzyme_complex_tmp1.setModuleName(module_name); //if (specify_layers) enzyme_complex_tmp1.setLayerName(module_name + "-" + enzyme_complex_name + "-EnzTmp1"); if (specify_layers) enzyme_complex_tmp1.setLayerName(module_name + "-EnzTmp1"); Node<TensorT> enzyme_complex_tmp2(enzyme_complex_name_tmp2, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex_tmp2.setModuleName(module_name); //if (specify_layers) enzyme_complex_tmp2.setLayerName(module_name + "-" + enzyme_complex_name + "-EnzTmp2"); if (specify_layers) enzyme_complex_tmp2.setLayerName(module_name + "-EnzTmp2"); Node<TensorT> product(reaction.products_ids[i], NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); product.setModuleName(module_name); //if (specify_layers) product.setLayerName(module_name + "-" + reaction.products_ids[i] + "-Met"); if (specify_layers) product.setLayerName(module_name + "-Met"); Node<TensorT> enzyme_complex_result(enzyme_complex_name_result, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex_result.setModuleName(module_name); //if (specify_layers) enzyme_complex_result.setLayerName(module_name + "-" + enzyme_complex_name + "-Result"); if (specify_layers) enzyme_complex_result.setLayerName(module_name + "-Result"); // Add the enzyme to complex link and weight std::string weight_name_1 = enzyme_complex_name + "_to_" + enzyme_complex_name_tmp1; Weight<TensorT> weight1(weight_name_1, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight1.setModuleName(module_name); if (specify_layers) weight1.setLayerName(module_name + "-Enz_to_EnzTmp1"); Link link1(weight_name_1, enzyme_complex_name, enzyme_complex_name_tmp1, weight_name_1); link1.setModuleName(module_name); // Add the complex tmp1 to tmp2 std::string weight_name_3 = enzyme_complex_name_tmp1 + "_to_" + enzyme_complex_name_tmp2; Weight<TensorT> weight3(weight_name_3, weight_init, solver); weight3.setModuleName(module_name); if (specify_layers) weight3.setLayerName(module_name + "-EnzTmp1_to_EnzTmp2"); Link link3(weight_name_3, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, weight_name_3); link3.setModuleName(module_name); // Add the enzyme loss pseudo link and weight std::string weight_name_4 = enzyme_complex_name_tmp2 + "_to_" + enzyme_complex_name; Weight<TensorT> weight4(weight_name_4, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(-1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight4.setModuleName(module_name); if (specify_layers) weight4.setLayerName(module_name + "-EnzTmp2_to_Enz"); Link link4(weight_name_4, enzyme_complex_name_tmp2, enzyme_complex_name, weight_name_4); link4.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, enzyme_complex_name)); // Add the resulting product std::string weight_name_5 = enzyme_complex_name_tmp2 + "_to_" + reaction.products_ids[i]; Weight<TensorT> weight5(weight_name_5, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight5.setModuleName(module_name); if (specify_layers) weight5.setLayerName(module_name + "-EnzTmp2_to_Met"); Link link5(weight_name_5, enzyme_complex_name_tmp2, reaction.products_ids[i], weight_name_5); link5.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, reaction.products_ids[i])); // Add the result enzyme complex link and weight std::string weight_name_result = enzyme_complex_name_tmp2 + "_to_" + enzyme_complex_name_result; Weight<TensorT> weight_result(weight_name_result, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight_result.setModuleName(module_name); if (specify_layers) weight_result.setLayerName(module_name + "-EnzTmp2_to_Result"); Link link_result(weight_name_result, enzyme_complex_name_tmp2, enzyme_complex_name_result, weight_name_result); link_result.setModuleName(module_name); if (i == reaction.products_ids.size()-1 && stoich == std::abs(reaction.products_stoichiometry[i]) - 1 && specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, enzyme_complex_name_result)); // Add all of the nodes, links, and weights to the model model.addNodes({ enzyme_complex, enzyme_complex_tmp1, product, enzyme_complex_tmp2, enzyme_complex_result }); model.addLinks({ link1, link3, link4, link5, link_result }); model.addWeights({ weight1, weight3, weight4, weight5, weight_result }); // Update the enzyme complex name with the result enzyme_complex_name = enzyme_complex_name_result; } } //// Experimental //// Add the reactivated complex link and weight //std::string weight_name_activated = enzyme_complex_name + "_to_" + enzyme_complex_names_result[0]; //Weight<TensorT> weight_activated(weight_name_activated, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); //weight_activated.setModuleName(module_name); //Link link_activated(weight_name_activated, enzyme_complex_name, enzyme_complex_names_result[0], weight_name_activated); //link_activated.setModuleName(module_name); //model.addLinks({ link_activated }); //model.addWeights({ weight_activated }); } template<typename TensorT> inline void ModelBuilderExperimental<TensorT>::addReactantsSequentialMin_2(Model<TensorT>& model, const BiochemicalReaction & reaction, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, std::string & enzyme_complex_name, std::string & enzyme_complex_name_tmp1, std::string & enzyme_complex_name_tmp2, std::string & enzyme_complex_name_result, const bool& is_reverse, bool specify_layers, bool specify_cycles) { if (is_reverse) enzyme_complex_name = reaction.reaction_id + "_reverse"; else enzyme_complex_name = reaction.reaction_id; // Create the intermediate enzyme complex names enzyme_complex_name_tmp1 = enzyme_complex_name; enzyme_complex_name_tmp2 = enzyme_complex_name; enzyme_complex_name_result = enzyme_complex_name; for (int i = 0; i < reaction.reactants_ids.size(); ++i) { enzyme_complex_name_tmp1 = enzyme_complex_name_tmp1 + ":" + reaction.reactants_ids[i]; enzyme_complex_name_tmp2 = enzyme_complex_name_tmp2 + "::" + reaction.reactants_ids[i]; enzyme_complex_name_result = enzyme_complex_name_result + "&" + reaction.reactants_ids[i]; } // Add the nodes for the enzyme complex, enzyme complex tmp, reactant, and enzyme complex result Node<TensorT> enzyme_complex(enzyme_complex_name, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex.setModuleName(module_name); //if (specify_layers) enzyme_complex.setLayerName(module_name + "-" + enzyme_complex_name + "-Enz"); if (specify_layers) enzyme_complex.setLayerName(module_name + "-Enz"); Node<TensorT> enzyme_complex_tmp1(enzyme_complex_name_tmp1, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<MinOp<TensorT>>(MinOp<TensorT>()), std::make_shared<MinErrorOp<TensorT>>(MinErrorOp<TensorT>()), std::make_shared<MinWeightGradOp<TensorT>>(MinWeightGradOp<TensorT>())); enzyme_complex_tmp1.setModuleName(module_name); //if (specify_layers) enzyme_complex_tmp1.setLayerName(module_name + "-" + enzyme_complex_name + "-EnzTmp1"); if (specify_layers) enzyme_complex_tmp1.setLayerName(module_name + "-EnzTmp1"); Node<TensorT> enzyme_complex_tmp2(enzyme_complex_name_tmp2, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex_tmp2.setModuleName(module_name); //if (specify_layers) enzyme_complex_tmp2.setLayerName(module_name + "-" + enzyme_complex_name + "-EnzTmp2"); if (specify_layers) enzyme_complex_tmp2.setLayerName(module_name + "-EnzTmp2"); Node<TensorT> enzyme_complex_result(enzyme_complex_name_result, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex_result.setModuleName(module_name); //if (specify_layers) enzyme_complex_result.setLayerName(module_name + "-" + enzyme_complex_name + "-Result"); if (specify_layers) enzyme_complex_result.setLayerName(module_name + "-Result"); // Add the enzyme to complex link and weight std::string weight_name_1 = enzyme_complex_name + "_to_" + enzyme_complex_name_tmp1; Weight<TensorT> weight1(weight_name_1, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight1.setModuleName(module_name); if (specify_layers) weight1.setLayerName(module_name + "-Enz_to_EnzTmp1"); Link link1(weight_name_1, enzyme_complex_name, enzyme_complex_name_tmp1, weight_name_1); link1.setModuleName(module_name); // Add the reactant to complex link and weight std::string weight_name_3 = enzyme_complex_name_tmp1 + "_to_" + enzyme_complex_name_tmp2; Weight<TensorT> weight3(weight_name_3, weight_init, solver); weight3.setModuleName(module_name); if (specify_layers) weight3.setLayerName(module_name + "-EnzTmp1_to_EnzTmp2"); Link link3(weight_name_3, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, weight_name_3); link3.setModuleName(module_name); // Add the enzyme loss pseudo link and weight std::string weight_name_4 = enzyme_complex_name_tmp2 + "_to_" + enzyme_complex_name; Weight<TensorT> weight4(weight_name_4, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(-1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight4.setModuleName(module_name); if (specify_layers) weight4.setLayerName(module_name + "-EnzTmp2_to_Enz"); Link link4(weight_name_4, enzyme_complex_name_tmp2, enzyme_complex_name, weight_name_4); link4.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, enzyme_complex_name)); // Add the result enzyme complex link and weight std::string weight_name_result = enzyme_complex_name_tmp2 + "_to_" + enzyme_complex_name_result; Weight<TensorT> weight_result(weight_name_result, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight_result.setModuleName(module_name); if (specify_layers) weight_result.setLayerName(module_name + "-EnzTmp2_to_Result"); Link link_result(weight_name_result, enzyme_complex_name_tmp2, enzyme_complex_name_result, weight_name_result); link_result.setModuleName(module_name); // Add "self" enzyme link std::string weight_name_1_self = enzyme_complex_name + "_to_" + enzyme_complex_name; Weight<TensorT> weight1_self(weight_name_1_self, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight1_self.setModuleName(module_name); if (specify_layers) weight1_self.setLayerName(module_name + "-Enz_to_Enz"); Link link1_self(weight_name_1_self, enzyme_complex_name, enzyme_complex_name, weight_name_1_self); link1_self.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name, enzyme_complex_name)); // Add "self" result link std::string weight_name = enzyme_complex_name_result + "_to_" + enzyme_complex_name_result; Weight<TensorT> weight(weight_name, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight.setModuleName(module_name); if (specify_layers) weight.setLayerName(module_name + "-Result_to_Result"); Link link(weight_name, enzyme_complex_name_result, enzyme_complex_name_result, weight_name); link.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_result, enzyme_complex_name_result)); for (int i = 0; i < reaction.reactants_ids.size(); ++i) { const int stoich = std::abs(reaction.reactants_stoichiometry[i]); Node<TensorT> reactant(reaction.reactants_ids[i], NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); reactant.setModuleName(module_name); //if (specify_layers) reactant.setLayerName(module_name + "-" + reaction.reactants_ids[i] + "-" + "-Met"); if (specify_layers) reactant.setLayerName(module_name + "-Met"); // Add the reactant to complex link and weight std::string weight_name_2 = reaction.reactants_ids[i] + "_to_" + enzyme_complex_name_tmp1; Weight<TensorT> weight2(weight_name_2, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0 / (TensorT)stoich)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight2.setModuleName(module_name); if (specify_layers) weight2.setLayerName(module_name + "-Met_to_EnzTmp1"); Link link2(weight_name_2, reaction.reactants_ids[i], enzyme_complex_name_tmp1, weight_name_2); link2.setModuleName(module_name); // Add the reactant loss pseudo link and weight std::string weight_name_5 = enzyme_complex_name_tmp2 + "_to_" + reaction.reactants_ids[i]; Weight<TensorT> weight5(weight_name_5, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(-(TensorT)stoich)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight5.setModuleName(module_name); if (specify_layers) weight5.setLayerName(module_name + "-EnzTmp2_to_Met"); Link link5(weight_name_5, enzyme_complex_name_tmp2, reaction.reactants_ids[i], weight_name_5); link5.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, reaction.reactants_ids[i])); // Add the reactant "self" link std::string weight_name_2_self = reaction.reactants_ids[i] + "_to_" + reaction.reactants_ids[i]; Weight<TensorT> weight2_self(weight_name_2_self, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight2_self.setModuleName(module_name); if (specify_layers) weight2_self.setLayerName(module_name + "-Met_to_Met"); Link link2_self(weight_name_2_self, reaction.reactants_ids[i], reaction.reactants_ids[i], weight_name_2_self); link2_self.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(reaction.reactants_ids[i], reaction.reactants_ids[i])); // Add all of the nodes, links, and weights to the model model.addNodes({ reactant }); model.addLinks({ link2, link5, link2_self }); model.addWeights({ weight2, weight5, weight2_self }); } // Add all of the nodes, links, and weights to the model model.addNodes({ enzyme_complex, enzyme_complex_tmp1, enzyme_complex_tmp2, enzyme_complex_result }); model.addLinks({ link1, link3, link4, link_result, link1_self, link }); model.addWeights({ weight1, weight3, weight4, weight_result, weight1_self, weight }); // Update the enzyme complex name with the result enzyme_complex_name = enzyme_complex_name_result; } template<typename TensorT> inline void ModelBuilderExperimental<TensorT>::addProductsSequentialMin_2(Model<TensorT>& model, const BiochemicalReaction & reaction, const std::string & name, const std::string & module_name, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, std::string & enzyme_complex_name, std::string & enzyme_complex_name_tmp1, std::string & enzyme_complex_name_tmp2, std::string & enzyme_complex_name_result, const bool& is_reverse, bool specify_layers, bool specify_cycles) { // make the products enzyme complex name if (is_reverse) { enzyme_complex_name_tmp1 = reaction.reaction_id + "_reverse"; enzyme_complex_name_tmp2 = reaction.reaction_id + "_reverse"; enzyme_complex_name_result = reaction.reaction_id + "_reverse"; } else { enzyme_complex_name_tmp1 = reaction.reaction_id; enzyme_complex_name_tmp2 = reaction.reaction_id; enzyme_complex_name_result = reaction.reaction_id; } for (int i = reaction.products_ids.size() - 1; i >= 0; --i) { enzyme_complex_name_tmp1 = enzyme_complex_name_tmp1 + "::" + reaction.products_ids[i]; enzyme_complex_name_tmp2 = enzyme_complex_name_tmp2 + ":" + reaction.products_ids[i]; } // Add the nodes for the enzyme complex, enzyme complex tmp, product, and enzyme complex result Node<TensorT> enzyme_complex(enzyme_complex_name, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex.setModuleName(module_name); //if (specify_layers) enzyme_complex.setLayerName(module_name + "-" + enzyme_complex_name + "-Enz"); if (specify_layers) enzyme_complex.setLayerName(module_name + "-Enz"); Node<TensorT> enzyme_complex_tmp1(enzyme_complex_name_tmp1, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<MinOp<TensorT>>(MinOp<TensorT>()), std::make_shared<MinErrorOp<TensorT>>(MinErrorOp<TensorT>()), std::make_shared<MinWeightGradOp<TensorT>>(MinWeightGradOp<TensorT>())); enzyme_complex_tmp1.setModuleName(module_name); //if (specify_layers) enzyme_complex_tmp1.setLayerName(module_name + "-" + enzyme_complex_name + "-EnzTmp1"); if (specify_layers) enzyme_complex_tmp1.setLayerName(module_name + "-EnzTmp1"); Node<TensorT> enzyme_complex_tmp2(enzyme_complex_name_tmp2, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex_tmp2.setModuleName(module_name); //if (specify_layers) enzyme_complex_tmp2.setLayerName(module_name + "-" + enzyme_complex_name + "-EnzTmp2"); if (specify_layers) enzyme_complex_tmp2.setLayerName(module_name + "-EnzTmp2"); Node<TensorT> enzyme_complex_result(enzyme_complex_name_result, NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); enzyme_complex_result.setModuleName(module_name); //if (specify_layers) enzyme_complex_result.setLayerName(module_name + "-" + enzyme_complex_name + "-Result"); if (specify_layers) enzyme_complex_result.setLayerName(module_name + "-Result"); // Add the enzyme to complex link and weight std::string weight_name_1 = enzyme_complex_name + "_to_" + enzyme_complex_name_tmp1; Weight<TensorT> weight1(weight_name_1, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight1.setModuleName(module_name); if (specify_layers) weight1.setLayerName(module_name + "-Enz_to_EnzTmp1"); Link link1(weight_name_1, enzyme_complex_name, enzyme_complex_name_tmp1, weight_name_1); link1.setModuleName(module_name); // Add the complex tmp1 to tmp2 std::string weight_name_3 = enzyme_complex_name_tmp1 + "_to_" + enzyme_complex_name_tmp2; Weight<TensorT> weight3(weight_name_3, weight_init, solver); weight3.setModuleName(module_name); if (specify_layers) weight3.setLayerName(module_name + "-EnzTmp1_to_EnzTmp2"); Link link3(weight_name_3, enzyme_complex_name_tmp1, enzyme_complex_name_tmp2, weight_name_3); link3.setModuleName(module_name); // Add the enzyme loss pseudo link and weight std::string weight_name_4 = enzyme_complex_name_tmp2 + "_to_" + enzyme_complex_name; Weight<TensorT> weight4(weight_name_4, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(-1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight4.setModuleName(module_name); if (specify_layers) weight4.setLayerName(module_name + "-EnzTmp2_to_Enz"); Link link4(weight_name_4, enzyme_complex_name_tmp2, enzyme_complex_name, weight_name_4); link4.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, enzyme_complex_name)); // Add the result enzyme complex link and weight std::string weight_name_result = enzyme_complex_name_tmp2 + "_to_" + enzyme_complex_name_result; Weight<TensorT> weight_result(weight_name_result, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight_result.setModuleName(module_name); if (specify_layers) weight_result.setLayerName(module_name + "-EnzTmp2_to_Result"); Link link_result(weight_name_result, enzyme_complex_name_tmp2, enzyme_complex_name_result, weight_name_result); link_result.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, enzyme_complex_name_result)); // Add the enzyme "self" link and weight std::string weight_name_1_self = enzyme_complex_name + "_to_" + enzyme_complex_name; Weight<TensorT> weight1_self(weight_name_1_self, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight1_self.setModuleName(module_name); if (specify_layers) weight1_self.setLayerName(module_name + "-Enz_to_Enz"); Link link1_self(weight_name_1_self, enzyme_complex_name, enzyme_complex_name, weight_name_1_self); link1_self.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name, enzyme_complex_name)); // Add the result enzyme complex "self" link and weight std::string weight_name = enzyme_complex_name_result + "_to_" + enzyme_complex_name_result; Weight<TensorT> weight(weight_name, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight.setModuleName(module_name); if (specify_layers) weight.setLayerName(module_name + "-Result_to_Result"); Link link(weight_name, enzyme_complex_name_result, enzyme_complex_name_result, weight_name); link.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_result, enzyme_complex_name_result)); // parse the products for (int i = 0; i < reaction.products_ids.size(); ++i) { const int stoich = std::abs(reaction.products_stoichiometry[i]); Node<TensorT> product(reaction.products_ids[i], NodeType::hidden, NodeStatus::initialized, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<TensorT>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); product.setModuleName(module_name); //if (specify_layers) product.setLayerName(module_name + "-" + reaction.products_ids[i] + "-Met"); if (specify_layers) product.setLayerName(module_name + "-Met"); // Add the resulting product std::string weight_name_5 = enzyme_complex_name_tmp2 + "_to_" + reaction.products_ids[i]; Weight<TensorT> weight5(weight_name_5, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>((TensorT)stoich)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight5.setModuleName(module_name); if (specify_layers) weight5.setLayerName(module_name + "-EnzTmp2_to_Met"); Link link5(weight_name_5, enzyme_complex_name_tmp2, reaction.products_ids[i], weight_name_5); link5.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(enzyme_complex_name_tmp2, reaction.products_ids[i])); // Add the resulting product "self" link std::string weight_name_5_self = reaction.products_ids[i] + "_to_" + reaction.products_ids[i]; Weight<TensorT> weight5_self(weight_name_5_self, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>())); weight5_self.setModuleName(module_name); if (specify_layers) weight5_self.setLayerName(module_name + "-Met_to_Met"); Link link5_self(weight_name_5_self, reaction.products_ids[i], reaction.products_ids[i], weight_name_5_self); link5_self.setModuleName(module_name); if (specify_cycles) model.getCyclicPairs().insert(std::make_pair(reaction.products_ids[i], reaction.products_ids[i])); // Add all of the nodes, links, and weights to the model model.addNodes({ product }); model.addLinks({ link5, link5_self }); model.addWeights({ weight5, weight5_self }); } // Add all of the nodes, links, and weights to the model model.addNodes({ enzyme_complex, enzyme_complex_tmp1, enzyme_complex_tmp2, enzyme_complex_result }); model.addLinks({ link1, link3, link4, link_result, link1_self, link }); model.addWeights({ weight1, weight3, weight4, weight_result, weight1_self, weight }); // Update the enzyme complex name with the result enzyme_complex_name = enzyme_complex_name_result; } template<typename TensorT> inline void ModelBuilderExperimental<TensorT>::addBiochemicalReactionsMLP(Model<TensorT>& model, const BiochemicalReactions & biochemicalReactions, const std::string & module_name, const std::vector<int>& n_fc, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const bool& add_biases, const bool& specify_layers, const bool& add_met_self_links) { // get all unique metabolite nodes in the model std::set<std::string> node_names_met; for (const auto& biochemicalReaction : biochemicalReactions) { if (!biochemicalReaction.second.used) continue; // Skip specified reactions for (const std::string& met_id : biochemicalReaction.second.reactants_ids) node_names_met.insert(met_id); for (const std::string& met_id : biochemicalReaction.second.products_ids) node_names_met.insert(met_id); } // add all metabolite nodes to the model std::vector<std::string> node_names_met_t0_vec; for (const std::string& met_id : node_names_met) { std::string met_name = met_id + "(t)"; node_names_met_t0_vec.push_back(met_name); Node<TensorT> met(met_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<TensorT>>(ReLUOp<TensorT>()), std::make_shared<ReLUGradOp<TensorT>>(ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); met.setModuleName(module_name); if (specify_layers) met.setLayerName(module_name + "-Met(t)"); model.addNodes({ met }); } std::vector<std::string> node_names_met_t1_vec; for (const std::string& met_id : node_names_met) { std::string met_name = met_id + "(t+1)"; node_names_met_t1_vec.push_back(met_name); Node<TensorT> met(met_name, NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<TensorT>>(ReLUOp<TensorT>()), std::make_shared<ReLUGradOp<TensorT>>(ReLUGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); met.setModuleName(module_name); if (specify_layers) met.setLayerName(module_name + "-Met(t+1)"); model.addNodes({ met }); } // add self metabolite links to the model if (add_met_self_links) { this->addSinglyConnected(model, module_name, node_names_met_t0_vec, node_names_met_t1_vec, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); } this->addSinglyConnected(model, module_name, node_names_met_t1_vec, node_names_met_t0_vec, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); for (int i = 0; i < node_names_met.size(); ++i) model.addCyclicPairs(std::make_pair(node_names_met_t1_vec.at(i), node_names_met_t0_vec.at(i))); // add all reaction MLPs to the model for (const auto& biochemicalReaction : biochemicalReactions) { if (!biochemicalReaction.second.used) continue; // Skip specified reactions addReactantsMLP_1(model, biochemicalReaction.second, n_fc, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad, weight_init, solver, add_biases, specify_layers, false); if (biochemicalReaction.second.reversibility) { // flip the products and reactants and repeat the above BiochemicalReaction reverse_reaction = biochemicalReaction.second; reverse_reaction.products_ids = biochemicalReaction.second.reactants_ids; reverse_reaction.products_stoichiometry = biochemicalReaction.second.reactants_stoichiometry; reverse_reaction.reactants_ids = biochemicalReaction.second.products_ids; reverse_reaction.reactants_stoichiometry = biochemicalReaction.second.products_stoichiometry; addReactantsMLP_1(model, biochemicalReaction.second, n_fc, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad, weight_init, solver, add_biases, specify_layers, true); } } } template<typename TensorT> inline void ModelBuilderExperimental<TensorT>::addReactantsMLP_1(Model<TensorT>& model, const BiochemicalReaction & reaction, const std::vector<int>& n_fc, const std::shared_ptr<ActivationOp<TensorT>>& node_activation, const std::shared_ptr<ActivationOp<TensorT>>& node_activation_grad, const std::shared_ptr<IntegrationOp<TensorT>>& node_integration, const std::shared_ptr<IntegrationErrorOp<TensorT>>& node_integration_error, const std::shared_ptr<IntegrationWeightGradOp<TensorT>>& node_integration_weight_grad, const std::shared_ptr<WeightInitOp<TensorT>>& weight_init, const std::shared_ptr<SolverOp<TensorT>>& solver, const bool& add_biases, const bool& specify_layers, const bool& is_reverse) { // make the input nodes (reactants + products) and output nodes (products) std::vector<std::string> node_names_all_t0, node_names_reactants_t0, node_names_reactants_t1, node_names_products_t1, node_names_products_t0; for (const std::string& met_id : reaction.reactants_ids) { std::string met_name = met_id + "(t)"; node_names_reactants_t0.push_back(met_name); node_names_all_t0.push_back(met_name); met_name = met_id + "(t+1)"; node_names_reactants_t1.push_back(met_name); } for (const std::string& met_id : reaction.products_ids) { std::string met_name = met_id + "(t+1)"; node_names_products_t1.push_back(met_name); met_name = met_id + "(t)"; node_names_products_t0.push_back(met_name); node_names_all_t0.push_back(met_name); } // add the initial SC input layer std::vector<std::string> node_names = node_names_all_t0; std::string node_name_fc_input = reaction.reaction_name; if (is_reverse) node_name_fc_input += "_reverse"; node_name_fc_input += "_FCInput"; node_names = this->addSinglyConnected(model, node_name_fc_input, node_name_fc_input, node_names, node_names_all_t0.size(), node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), // TODO: should correspond to stoichiometry std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, add_biases, specify_layers); // make the internal FC layers int iter = 0; for (const int& fc_size: n_fc) { std::string node_name = reaction.reaction_name; if (is_reverse) node_name += "_reverse"; node_name = node_name + "_" + std::to_string(iter); node_names = this->addFullyConnected(model, node_name, node_name, node_names, fc_size, node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size() + fc_size, 2)), //weight_init, solver, 0.0f, 0.0f, add_biases, specify_layers); ++iter; } // add a final output FC layer std::string node_name_fc_output = reaction.reaction_name; if (is_reverse) node_name_fc_output += "_reverse"; node_name_fc_output += "_FCOut"; node_names = this->addFullyConnected(model, node_name_fc_output, node_name_fc_output, node_names, node_names_all_t0.size(), node_activation, node_activation_grad, node_integration, node_integration_error, node_integration_weight_grad, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size() + node_names_all_t0.size(), 2)), //weight_init, solver, 0.0f, 0.0f, add_biases, specify_layers); // parse the node_names into reactant and products std::vector<std::string> node_names_FCOut_reactants, node_names_FCOut_products; iter = 0; for (const std::string& met_id : reaction.reactants_ids) { node_names_FCOut_reactants.push_back(node_names.at(iter)); ++iter; } for (const std::string& met_id : reaction.products_ids) { node_names_FCOut_products.push_back(node_names.at(iter)); ++iter; } // NOTE: the below allows for learning mass balance; alternatively, mass balance could be hard-coded // connect the final SC layer to the input nodes (reactants) std::string node_name_reactant_out = reaction.reaction_name; if (is_reverse) node_name_reactant_out += "_reverse"; node_name_reactant_out += "_ReactantsOut"; this->addSinglyConnected(model, node_name_reactant_out, node_names_FCOut_reactants, node_names_reactants_t1, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), // The amount that goes back into the node; TOOD: should correspond to stoichiometry std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); // connect the final SC layer to the output nodes (products) std::string node_name_product_out = reaction.reaction_name; if (is_reverse) node_name_product_out += "_reverse"; node_name_product_out += "_ProductsOut"; this->addSinglyConnected(model, node_name_product_out, node_names_FCOut_products, node_names_products_t1, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), // TOOD: should correspond to stoichiometry std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); } } #endif //EVONET_MODELBUILDEREXPERIMENTAL_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_POPULATIONLOGGER_H #define EVONET_POPULATIONLOGGER_H // .h #include <EvoNet/io/CSVWriter.h> #include <vector> // .cpp #include <ctime> // time format #include <chrono> // current time #include <set> namespace EvoNet { /** @brief Class to log population training metrics */ template<typename TensorT> class PopulationLogger { public: PopulationLogger() = default; ///< Default constructor PopulationLogger(bool log_time_generation, bool log_models_validation_errors_per_generation) : log_time_generation_(log_time_generation), log_models_validation_errors_per_generation_(log_models_validation_errors_per_generation) {}; ~PopulationLogger() = default; ///< Default destructor bool getLogTimeGeneration() { return log_time_generation_; } bool getLogTrainValErrorsGeneration() { return log_models_validation_errors_per_generation_; } CSVWriter getLogTimeGenerationCSVWriter() { return log_time_generation_csvwriter_; } CSVWriter getLogTrainValErrorsGenerationCSVWriter() { return log_models_validation_errors_per_generation_csvwriter_; } /** @brief Initialize the log files @param[in] population_name @returns True for a successfull write operation */ bool initLogs(const std::string& population_name); /** @brief Initialize the log files @param[in] model @returns True for a successfull write operation */ bool writeLogs(const int& n_generation, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation); /** @brief Log generation iteration number vs. time @param[in] n_generation @param[in] time_stamp @returns True for a successfull write operation */ bool logTimePerGeneration(const int& n_generation); /** @brief Log population validation errors per generation @param[in] n_generation @param[in] models_validation_errors_per_generation @returns True for a successfull write operation */ bool logTrainValErrorsPerGeneration(const int& n_generation, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation); private: bool log_time_generation_ = false; ///< log ... CSVWriter log_time_generation_csvwriter_; bool log_models_validation_errors_per_generation_ = false; ///< log CSVWriter log_models_validation_errors_per_generation_csvwriter_; // internal variables std::map<std::string, std::vector<std::string>> module_to_node_names_; }; template<typename TensorT> bool PopulationLogger<TensorT>::initLogs(const std::string& population_name) { if (log_time_generation_) { std::string filename = population_name + "_TimePerGeneration.csv"; CSVWriter csvwriter(filename); log_time_generation_csvwriter_ = csvwriter; } if (log_models_validation_errors_per_generation_) { std::string filename = population_name + "_TrainValErrorsPerGeneration.csv"; CSVWriter csvwriter(filename); log_models_validation_errors_per_generation_csvwriter_ = csvwriter; } return true; } template<typename TensorT> bool PopulationLogger<TensorT>::writeLogs(const int & n_generations, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation) { if (log_time_generation_) { logTimePerGeneration(n_generations); } if (log_models_validation_errors_per_generation_) { logTrainValErrorsPerGeneration(n_generations, models_validation_errors_per_generation); } return true; } template<typename TensorT> bool PopulationLogger<TensorT>::logTimePerGeneration(const int & n_generation) { // writer header if (log_time_generation_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = { "Generation", "TimeStamp", "Milliseconds" }; log_time_generation_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // TimeStamp std::chrono::time_point<std::chrono::system_clock> time_now = std::chrono::system_clock::now(); std::time_t time_now_t = std::chrono::system_clock::to_time_t(time_now); std::tm now_tm = *std::localtime(&time_now_t); char timestamp[64]; std::strftime(timestamp, 64, "%Y-%m-%d-%H-%M-%S", &now_tm); std::string time_stamp(timestamp); // Current time in milliseconds since 1970 auto now = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()).count(); std::string milli_now = std::to_string(now); // write next entry std::vector<std::string> line = { std::to_string(n_generation), time_stamp, milli_now }; log_time_generation_csvwriter_.writeDataInRow(line.begin(), line.end()); return true; } template<typename TensorT> bool PopulationLogger<TensorT>::logTrainValErrorsPerGeneration(const int & n_generation, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation) { // writer header if (log_models_validation_errors_per_generation_csvwriter_.getLineCount() == 0) { std::vector<std::string> headers = {"generation", "model_id", "model_name", "ave_validation_error" }; log_models_validation_errors_per_generation_csvwriter_.writeDataInRow(headers.begin(), headers.end()); } // write next entry for (const std::tuple<int, std::string, TensorT>& model_validation_error_per_generation : models_validation_errors_per_generation) { std::vector<std::string> line = { std::to_string(n_generation) }; line.push_back(std::to_string(std::get<0>(model_validation_error_per_generation))); line.push_back(std::get<1>(model_validation_error_per_generation)); char error[512]; sprintf(error, "%0.6f", std::get<2>(model_validation_error_per_generation)); std::string error_str(error); line.push_back(error_str); log_models_validation_errors_per_generation_csvwriter_.writeDataInRow(line.begin(), line.end()); } return true; } } #endif //EVONET_POPULATIONLOGGER_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_LINK_H #define EVONET_LINK_H #include <tuple> #include <string> #include <cereal/cereal.hpp> namespace EvoNet { /** @brief Directed Network Link */ class Link { public: Link(); ///< Default constructor Link(const Link& other); ///< Copy constructor // [TODO: add test] Link(const int& id); ///< Explicit constructor Link(const std::string& name); ///< Explicit constructor Link(const std::string& name, const std::string& source_node_name, const std::string& sink_node_name, const std::string& weight_name); ///< Explicit constructor ~Link(); ///< Default destructor inline bool operator==(const Link& other) const { return std::tie( id_, source_node_name_, sink_node_name_, weight_name_, name_, module_id_, module_name_ ) == std::tie( other.id_, other.source_node_name_, other.sink_node_name_, other.weight_name_, other.name_, other.module_id_, other.module_name_ ) ; } inline bool operator!=(const Link& other) const { return !(*this == other); } inline Link& operator=(const Link& other) { // [TODO: add test] id_ = other.id_; name_ = other.name_; module_id_ = other.module_id_; module_name_ = other.module_name_; source_node_name_ = other.source_node_name_; sink_node_name_ = other.sink_node_name_; weight_name_ = other.weight_name_; return *this; } void setId(const int& id); ///< id setter int getId() const; ///< id getter void setName(const std::string& name); ///< naem setter std::string getName() const; ///< name getter void setSourceNodeName(const std::string& source_node_name); ///< source_node_name setter std::string getSourceNodeName() const; ///< source_node_name getter void setSinkNodeName(const std::string& sink_node_name); ///< sink_node_name setter std::string getSinkNodeName() const; ///< sink_node_name getter void setWeightName(const std::string& weight_name); ///< weight_name setter std::string getWeightName() const; ///< weight_name getter void setModuleId(const int& module_id); ///< module id setter int getModuleId() const; ///< module id getter void setModuleName(const std::string& module_name); ///< module name setter std::string getModuleName() const; ///< module name getter private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(id_, name_, module_id_, module_name_, source_node_name_, sink_node_name_, weight_name_); } int id_ = -1; ///< Weight ID std::string name_ = ""; ///< Weight Name int module_id_ = -1; ///< Module ID std::string module_name_ = ""; ///<Module Name std::string source_node_name_; ///< Link source node std::string sink_node_name_; ///< Link sink node std::string weight_name_; ///< Link weight }; } #endif //EVONET_LINK_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELFILE_H #define EVONET_MODELFILE_H // .h #include <EvoNet/ml/Model.h> #include <iostream> #include <fstream> #include <vector> // .cpp #include <EvoNet/io/NodeFile.h> #include <EvoNet/io/WeightFile.h> #include <EvoNet/io/LinkFile.h> //#include <filesystem> // C++ 17 #include <cereal/types/memory.hpp> #include <cereal/types/map.hpp> #include <cereal/types/tuple.hpp> #include <cereal/types/utility.hpp> // std::pair #include <cereal/types/vector.hpp> #include <cereal/types/set.hpp> #include <cereal/archives/binary.hpp> namespace EvoNet { /** @brief ModelFile */ template<typename TensorT> class ModelFile { public: ModelFile() = default; ///< Default constructor ~ModelFile() = default; ///< Default destructor /** @brief store Model from file @param filename The name of the model file @param model The model to store @returns Status True on success, False if not */ bool storeModelBinary(const std::string& filename, const Model<TensorT>& model); /** @brief load Model from file @param filename The name of the model file @param model The model to load data into @returns Status True on success, False if not */ bool loadModelBinary(const std::string& filename, Model<TensorT>& model); /** @brief load Model weights from a binarized model file file @param filename The name of the model file @param weights The weights to load data into @returns Status True on success, False if not */ bool loadWeightValuesBinary(const std::string& filename, std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights); /** @brief store nodes, links, and weights as a .csv file from a Model @param filename_nodes The name of the node file @param filename_links The name of the link file @param filename_weights The name of the weight file @param model The model to load data into @returns Status True on success, False if not */ bool storeModelCsv(const std::string& filename_nodes, const std::string& filename_links, const std::string& filename_weights, Model<TensorT>& model, bool store_nodes = true, bool store_links = true, bool store_weights = true); /** @brief Load nodes, links, and weights from file and create a Model @param filename_nodes The name of the node file @param filename_links The name of the link file @param filename_weights The name of the weight file @param model The model to load data into @returns Status True on success, False if not */ bool loadModelCsv(const std::string& filename_nodes, const std::string& filename_links, const std::string& filename_weights, Model<TensorT>& model, bool load_nodes = true, bool load_links = true, bool load_weights = true); /** @brief save network model to file in dot format for visualization using e.g., GraphVIZ [TODO: move to GraphFile and take in the model as input to allow for the following 1. coloring of nodes based on node type (i.e., input, hidden, bias, or output) e.g. node1 [shape=circle,style=filled,color=".7 .3 1.0"]; 2. annotation of links with the value of the weight e.g. node1 -> node2 [style=italic,label="weight = 10"]; ] @param filename The name of the links file (.gv extension) @param links The links to save to disk @returns Status True on success, False if not */ bool storeModelDot(const std::string& filename, const Model<TensorT>& model); }; template<typename TensorT> bool ModelFile<TensorT>::storeModelBinary(const std::string & filename, const Model<TensorT>& model) { std::ofstream ofs(filename, std::ios::binary); //if (ofs.is_open() == false) {// Lines check to make sure the file is not already created cereal::BinaryOutputArchive oarchive(ofs); oarchive(model); ofs.close(); //}// Lines check to make sure the file is not already created return true; } template<typename TensorT> bool ModelFile<TensorT>::loadModelBinary(const std::string & filename, Model<TensorT>& model) { std::ifstream ifs(filename, std::ios::binary); if (ifs.is_open()) { cereal::BinaryInputArchive iarchive(ifs); iarchive(model); ifs.close(); } else { std::cout << "The model with filename " + filename + " was not found." << std::endl; } return true; } template<typename TensorT> inline bool ModelFile<TensorT>::loadWeightValuesBinary(const std::string & filename, std::map<std::string, std::shared_ptr<Weight<TensorT>>>& weights) { // Load in the binarized model Model<TensorT> model; loadModelBinary(filename, model); // Transer over the weights for (auto& weight_map: weights){ // parse the weight value try { weight_map.second->setWeight(model.weights_.at(weight_map.first)->getWeight()); weight_map.second->setInitWeight(false); } catch (std::exception& e) { printf("Exception: %s", e.what()); } } return true; } template<typename TensorT> bool ModelFile<TensorT>::storeModelCsv(const std::string & filename_nodes, const std::string & filename_links, const std::string & filename_weights, Model<TensorT>& model, bool store_nodes, bool store_links, bool store_weights) { // [PERFORMANCE: this can be parallelized using threads] if (store_nodes) { NodeFile<TensorT> node_file; node_file.storeNodesCsv(filename_nodes, model.nodes_); } if (store_links) { LinkFile link_file; link_file.storeLinksCsv(filename_links, model.links_); } if (store_weights) { WeightFile<TensorT> weight_file; weight_file.storeWeightsCsv(filename_weights, model.weights_); } return true; } template<typename TensorT> bool ModelFile<TensorT>::loadModelCsv(const std::string & filename_nodes, const std::string & filename_links, const std::string & filename_weights, Model<TensorT>& model, bool load_nodes, bool load_links, bool load_weights) { // [PERFORMANCE: this can be parallelized using threads] // load the nodes if (load_nodes) { NodeFile<TensorT> node_file; std::map<std::string, std::shared_ptr<Node<TensorT>>> nodes; node_file.loadNodesCsv(filename_nodes, nodes); model.nodes_ = nodes; model.setInputAndOutputNodes(); // Need to initialize the input/output node cache } // load the links if (load_links) { LinkFile link_file; std::map<std::string, std::shared_ptr<Link>> links; link_file.loadLinksCsv(filename_links, links); model.links_ = links; } // load the weights if (load_weights) { WeightFile<TensorT> weight_file; std::map<std::string, std::shared_ptr<Weight<TensorT>>> weights; weight_file.loadWeightsCsv(filename_weights, weights); model.weights_ = weights; } return true; } template<typename TensorT> bool ModelFile<TensorT>::storeModelDot(const std::string& filename, const Model<TensorT>& model) { std::fstream file; // Open the file in truncate mode file.open(filename, std::ios::out | std::ios::trunc); file << "digraph G {\n"; // first line // write node formating to file for (const Node<TensorT>& node : model.getNodes()) { if (node.getType() == NodeType::input) { char line_char[512]; sprintf(line_char, "\t\"%s\" [shape=circle,style=filled,color=\"#D3D3D3\"];\n", node.getName().data()); std::string line(line_char); file << line; } else if (node.getType() == NodeType::output) { char line_char[512]; sprintf(line_char, "\t\"%s\" [shape=circle,style=filled,color=\"#00FFFF\"];\n", node.getName().data()); std::string line(line_char); file << line; } } // write each source/sink to file for (const Link& link : model.getLinks()) { if (model.getNode(link.getSourceNodeName()).getType() != NodeType::bias) { char line_char[512]; sprintf(line_char, "\t\"%s\" -> \"%s\";\n", link.getSourceNodeName().data(), link.getSinkNodeName().data()); std::string line(line_char); file << line; } } file << "}"; // last line file.close(); return true; } } #endif //EVONET_MODELFILE_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELREPLICATOR_H #define EVONET_MODELREPLICATOR_H // .h #include <EvoNet/ml/Model.h> #include <vector> #include <string> // .cpp #include <EvoNet/core/Preprocessing.h> #include <random> // random number generator #include <algorithm> // tokenizing #include <regex> // tokenizing #include <ctime> // time format #include <chrono> // current time #include <set> namespace EvoNet { /** @brief Replicates a model with or without modification (i.e., mutation) */ template<typename TensorT> class ModelReplicator { public: ModelReplicator() = default; ///< Default constructor ~ModelReplicator() = default; ///< Default destructor void setNNodeDownAdditions(const int& n_node_additions); ///< n_node_additions setter void setNNodeRightAdditions(const int& n_node_additions); ///< n_nodes_additions setter void setNNodeDownCopies(const int& n_node_copies); ///< n_node_copies setter void setNNodeRightCopies(const int& n_node_copies); ///< n_node_copies setter void setNLinkAdditions(const int& n_link_additions); ///< n_link_additions setter void setNLinkCopies(const int& n_link_copies); ///< n_link_copies setter void setNNodeDeletions(const int& n_node_deletions); ///< n_node_deletions setter void setNLinkDeletions(const int& n_link_deletions); ///< n_link_deletions setter void setNNodeActivationChanges(const int& n_node_activation_changes); ///< n_node_activation_changes setter void setNNodeIntegrationChanges(const int& n_node_integration_changes); ///< n_node_integration_changes setter void setNModuleAdditions(const int& n_module_additions); ///< n_module_additions setter void setNModuleCopies(const int& n_module_copies); ///< n_module_copies setter void setNModuleDeletions(const int& n_module_deletions); ///< n_module_deletions setter void setNodeActivations(const std::vector<std::pair<std::shared_ptr<ActivationOp<TensorT>>, std::shared_ptr<ActivationOp<TensorT>>>>& node_activations); ///< node_activations setter void setNodeIntegrations(const std::vector<std::tuple<std::shared_ptr<IntegrationOp<TensorT>>, std::shared_ptr<IntegrationErrorOp<TensorT>>, std::shared_ptr<IntegrationWeightGradOp<TensorT>>>>& node_integrations); ///< node_integrations setter int getNNodeDownAdditions() const; ///< n_node_additions setter int getNNodeRightAdditions() const; ///< n_node_additions getter int getNNodeDownCopies() const; ///< n_node_copies getter int getNNodeRightCopies() const; ///< n_node_copies getter int getNLinkAdditions() const; ///< n_link_additions setter int getNLinkCopies() const; ///< n_link_copies setter int getNNodeDeletions() const; ///< n_node_deletions setter int getNLinkDeletions() const; ///< n_link_deletions setter int getNNodeActivationChanges() const; ///< n_node_activation_changes setter int getNNodeIntegrationChanges() const; ///< n_node_integration_changes setter int getNModuleAdditions() const; ///< n_module_additions setter int getNModuleCopies() const; ///< n_module_copies setter int getNModuleDeletions() const; ///< n_module_deletions setter std::vector<std::pair<std::shared_ptr<ActivationOp<TensorT>>, std::shared_ptr<ActivationOp<TensorT>>>> getNodeActivations() const; ///< node_activations setter std::vector<std::tuple<std::shared_ptr<IntegrationOp<TensorT>>, std::shared_ptr<IntegrationErrorOp<TensorT>>, std::shared_ptr<IntegrationWeightGradOp<TensorT>>>> getNodeIntegrations() const; ///< node_integrations setter void setNWeightChanges(const int& n_weight_changes); ///< n_weight_changes setter void setWeightChangeStDev(const TensorT& weight_change_stdev); ///< weight_change_stdev setter int getNWeightChanges() const; ///< n_weight_changes getter TensorT getWeightChangeStDev() const; ///< weight_change_stdev getter /** @brief Modify (i.e., mutate) an existing model in place @param[in, out] model The model to modify */ void modifyModel(Model<TensorT>& model, std::string unique_str = "", int prune_iterations = 1e3); /** @brief Select nodes given a set of conditions @param[in, out] model The model @param node_type_exclude Node types to exclude @param node_type_include Node types to include @returns A node name */ std::vector<std::string> selectNodes( const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include); /** @brief Select modules given a set of conditions @param[in, out] model The model @param node_type_exclude Node types to exclude @param node_type_include Node types to include @returns A node name */ std::vector<std::string> selectModules( const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include); /** @brief Select random node given a set of conditions @param[in, out] model The model @param node_type_exclude Node types to exclude @param node_type_include Node types to include @param node Previous node selected (for distance calculation) @param distance_weight Probability weighting to punish more "distant" nodes @param direction Source to Sink node direction; options are "forward, reverse" @returns A node name */ std::string selectRandomNode( const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include, const Node<TensorT>& node, const TensorT& distance_weight, const std::string& direction); std::string selectRandomNode( const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include); /** @brief Select random link given a set of conditions @param[in, out] model The model @param source_node_type_exclude Source node types to exclude @param source_node_type_include Source node types to include @param sink_node_type_exclude Sink node types to exclude @param sink_node_type_include Sink node types to include @param direction Source to Sink node direction; options are "forward, reverse" @returns A link name */ std::string selectRandomLink( const Model<TensorT>& model, const std::vector<NodeType>& source_node_type_exclude, const std::vector<NodeType>& source_node_type_include, const std::vector<NodeType>& sink_node_type_exclude, const std::vector<NodeType>& sink_node_type_include, const std::string& direction); std::string selectRandomLink( const Model<TensorT>& model, const std::vector<NodeType>& source_node_type_exclude, const std::vector<NodeType>& source_node_type_include, const std::vector<NodeType>& sink_node_type_exclude, const std::vector<NodeType>& sink_node_type_include); /** @brief Select random module given a set of conditions @param[in] model The model @returns A module name */ std::string selectRandomModule( const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include); /** @brief Copy a node in the model. This operation results in a layer addition below the target node whereby the weigh between the input node and target node are reused for the link between the new node and target node. @param[in, out] model The model */ void copyNodeDown(Model<TensorT>& model, std::string unique_str = ""); /** @brief Copy a node in the model. This operation results in a layer expansion to the left or right whereby all target node input and output node links are also copied. @param[in, out] model The model */ void copyNodeRight(Model<TensorT>& model, std::string unique_str = ""); /** @brief Add node to the model (Layer injection down). The method utilizes a modified version of the NEAT algorithm whereby a random link is chosen and bifurcated with a new node. Instead, new nodes are added using the following procedure: 1. an existing node is randomly chosen from the model. 2. a randomly connected input link to the node is chosen. Note that an input link is chose because it is easier to exclude input nodes than output nodes. 3. the chosen node is copied and a new link and new weight is added between the new node and the existing node. 4. the new link becomes the input link of the existing node and the output link of the new node, and existing link becomes the input link of the new node. References: <NAME> & <NAME> (2002). "Evolving Neural Networks Through Augmenting Topologies". Evolutionary Computation. 10 (2): 99–127. doi:10.1162/106365602320169811 @param[in, out] model The model */ void addNodeDown(Model<TensorT>& model, std::string unique_str = "", bool as_copy = false); /** @brief Add node to the model (Layer expansion right). New nodes are added using the following procedure: 1. an existing node is randomly chosen from the model. 2. all node input and out put links are replicated and new weights for each link are made. 3. the chosen node is copied. 4. the new node is then connected to the replicated input and output links. @param[in, out] model The model */ void addNodeRight(Model<TensorT>& model, std::string unique_str = "", bool as_copy = false); /** @brief add link with a new weight to the model. @param[in, out] model The model */ void addLink(Model<TensorT>& model, std::string unique_str = "", bool as_copy = false); /** @brief copy an existing link (no new weight is created), and add the copied link to the model. @param[in, out] model The model */ void copyLink(Model<TensorT>& model, std::string unique_str = ""); /** @brief Add a new module templated off of an existing module with new weights to the model @param[in, out] model The model */ void addModule(Model<TensorT>& model, std::string unique_str = "", bool as_copy = false); /** @brief Copy an existing module (no new weights are created), and add the copied module to the model @param[in, out] model The model */ void copyModule(Model<TensorT>& model, std::string unique_str = ""); /** @brief delete node to the model @param[in, out] model The model @param[in] prune_iterations The number of model recursive prune iterations */ void deleteNode(Model<TensorT>& model, int prune_iterations = 1e6); /** @brief delete link to the model @param[in, out] model The model @param[in] prune_iterations The number of model recursive prune iterations */ void deleteLink(Model<TensorT>& model, int prune_iterations = 1e6); /** @brief delete module in the model @param[in, out] model The model @param[in] prune_iterations The number of model recursive prune iterations */ void deleteModule(Model<TensorT>& model, int prune_iterations = 1e6); /** @brief change node activation @param[in, out] model The model */ void changeNodeActivation(Model<TensorT>& model, std::string unique_str = ""); /** @brief change node integration @param[in, out] model The model */ void changeNodeIntegration(Model<TensorT>& model, std::string unique_str = ""); /** @brief modify weights in the model @param[in, out] model The model */ void modifyWeight(Model<TensorT>& model); /** @brief Make a unique time stampped hash of the form left_str + right_str + timestamp @param[in] left_str @param[in] right_str @returns A unique string hash */ std::string makeUniqueHash(const std::string& left_str, const std::string& right_str); /** @brief Update the name of a node/link/weight/module @param[in] name Original name @param[in] new_name_format The format for the new name @param[in] unique_str A unique tag @returns A new name */ void updateName(const std::string& name, const std::string& new_name_format, std::string unique_str, std::string& name_prefix, std::string& new_name); /** @brief randomly order the mutations @returns A random list of mutations types */ std::vector<std::string> makeRandomModificationOrder(); /** @brief set random model modification parameters @param[in] node_down_additions lower/upper bound for the number of potential node additions @param[in] node_right_additions lower/upper bound for the number of potential node additions @param[in] node_down_copies lower/upper bound for the number of potential node copies @param[in] node_right_copies lower/upper bound for the number of potential node copies @param[in] link_additions lower/upper bound for the number of potential link additions @param[in] link_copies lower/upper bound for the number of potential link copies @param[in] node_deletions lower/upper bound for the number of potential node deletions @param[in] link_deletions lower/upper bound for the number of potential link deletions @param[in] node_activation_changes lower/upper bound for the number of potential node activation changes @param[in] node_integration_changes lower/upper bound for the number of potential node integration changes @param[in] module_additions lower/upper bound for the number of potential module additions @param[in] module_copies lower/upper bound for the number of potential module copies @param[in] module_deletions lower/upper bound for the number of potential module deletions */ void setRandomModifications( const std::pair<int, int>& node_down_additions, const std::pair<int, int>& node_right_additions, const std::pair<int, int>& node_down_copies, const std::pair<int, int>& node_right_copies, const std::pair<int, int>& link_additions, const std::pair<int, int>& link_copies, const std::pair<int, int>& node_deletions, const std::pair<int, int>& link_deletions, const std::pair<int, int>& node_activation_changes, const std::pair<int, int>& node_integration_changes, const std::pair<int, int>& module_additions, const std::pair<int, int>& module_copies, const std::pair<int, int>& module_deletions); std::vector<std::pair<int,int>> getRandomModifications() const; ///< get a copy of the current Random modifications attributes /** @brief make random model modification parameters */ void makeRandomModifications(); /** @brief Entry point for users to code their adaptive scheduler to modify model modification parameters based on a given trigger @param[in] n_generations The number of evolution generations @param[in] models The models in the population @param[in] model_errors The trace of models errors from validation at each generation */ virtual void adaptiveReplicatorScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations); private: // modification parameters int n_node_down_additions_ = 0; ///< new nodes "down" to add to the model (nodes are created through replication) int n_node_right_additions_ = 0; ///< new nodes "right" to add to the model (nodes are created through replication) int n_node_down_copies_ = 0; ///< nodes to duplicate "down" in the model int n_node_right_copies_ = 0; ///< nodes to duplicate "right" in the model int n_link_additions_ = 0; ///< new links to add to the model int n_link_copies_ = 0; ///< new links to copy in the model int n_node_deletions_ = 0; ///< nodes to remove from the model int n_link_deletions_ = 0; ///< links to remove from the model int n_node_activation_changes_ = 0; ///< nodes to change the activation int n_node_integration_changes_ = 0; ///< nodes to change the activation int n_module_additions_ = 0; ///< new modules added to the model (modules are created through replication) int n_module_copies_ = 0; ///< copied modules added to the model (modules are created through replication) int n_module_deletions_ = 0; ///< modules to remove from the model // random modification parameters std::pair<int, int> node_down_additions_ = std::make_pair(0, 0); std::pair<int, int> node_right_additions_ = std::make_pair(0, 0); std::pair<int, int> node_down_copies_ = std::make_pair(0, 0); std::pair<int, int> node_right_copies_ = std::make_pair(0, 0); std::pair<int, int> link_additions_ = std::make_pair(0, 0); std::pair<int, int> link_copies_ = std::make_pair(0, 0); std::pair<int, int> node_deletions_ = std::make_pair(0, 0); std::pair<int, int> link_deletions_ = std::make_pair(0, 0); std::pair<int, int> node_activation_changes_ = std::make_pair(0, 0); std::pair<int, int> node_integration_changes_ = std::make_pair(0, 0); std::pair<int, int> module_additions_ = std::make_pair(0, 0); std::pair<int, int> module_copies_ = std::make_pair(0, 0); std::pair<int, int> module_deletions_ = std::make_pair(0, 0); std::vector<std::pair<std::shared_ptr<ActivationOp<TensorT>>, std::shared_ptr<ActivationOp<TensorT>>>> node_activations_; std::vector<std::tuple<std::shared_ptr<IntegrationOp<TensorT>>, std::shared_ptr<IntegrationErrorOp<TensorT>>, std::shared_ptr<IntegrationWeightGradOp<TensorT>>>> node_integrations_; // not yet implemented... int n_weight_changes_ = 0; ///< the number of weights to change in the model TensorT weight_change_stdev_ = (TensorT)0; ///< the standard deviation to change the weights in the model }; template<typename TensorT> void ModelReplicator<TensorT>::setNNodeDownCopies(const int& n_node_copies) { n_node_down_copies_ = n_node_copies; } template<typename TensorT> void ModelReplicator<TensorT>::setNNodeDownAdditions(const int& n_node_additions) { n_node_down_additions_ = n_node_additions; } template<typename TensorT> void ModelReplicator<TensorT>::setNNodeRightCopies(const int& n_node_copies) { n_node_right_copies_ = n_node_copies; } template<typename TensorT> void ModelReplicator<TensorT>::setNNodeRightAdditions(const int& n_node_additions) { n_node_right_additions_ = n_node_additions; } template<typename TensorT> void ModelReplicator<TensorT>::setNLinkAdditions(const int& n_link_additions) { n_link_additions_ = n_link_additions; } template<typename TensorT> void ModelReplicator<TensorT>::setNLinkCopies(const int& n_link_copies) { n_link_copies_ = n_link_copies; } template<typename TensorT> void ModelReplicator<TensorT>::setNNodeDeletions(const int& n_node_deletions) { n_node_deletions_ = n_node_deletions; } template<typename TensorT> void ModelReplicator<TensorT>::setNLinkDeletions(const int& n_link_deletions) { n_link_deletions_ = n_link_deletions; } template<typename TensorT> void ModelReplicator<TensorT>::setNNodeActivationChanges(const int & n_node_activation_changes) { n_node_activation_changes_ = n_node_activation_changes; } template<typename TensorT> void ModelReplicator<TensorT>::setNNodeIntegrationChanges(const int & n_node_integration_changes) { n_node_integration_changes_ = n_node_integration_changes; } template<typename TensorT> void ModelReplicator<TensorT>::setNodeActivations(const std::vector<std::pair<std::shared_ptr<ActivationOp<TensorT>>, std::shared_ptr<ActivationOp<TensorT>>>>& node_activations) { node_activations_ = node_activations; } template<typename TensorT> void ModelReplicator<TensorT>::setNodeIntegrations(const std::vector<std::tuple<std::shared_ptr<IntegrationOp<TensorT>>, std::shared_ptr<IntegrationErrorOp<TensorT>>, std::shared_ptr<IntegrationWeightGradOp<TensorT>>>>& node_integrations) { node_integrations_ = node_integrations; } template<typename TensorT> void ModelReplicator<TensorT>::setNModuleAdditions(const int & n_module_additions) { n_module_additions_ = n_module_additions; } template<typename TensorT> inline void ModelReplicator<TensorT>::setNModuleCopies(const int & n_module_copies) { n_module_copies_ = n_module_copies; } template<typename TensorT> void ModelReplicator<TensorT>::setNModuleDeletions(const int & n_module_deletions) { n_module_deletions_ = n_module_deletions; } template<typename TensorT> void ModelReplicator<TensorT>::setNWeightChanges(const int& n_weight_changes) { n_weight_changes_ = n_weight_changes; } template<typename TensorT> void ModelReplicator<TensorT>::setWeightChangeStDev(const TensorT& weight_change_stdev) { weight_change_stdev_ = weight_change_stdev; } template<typename TensorT> int ModelReplicator<TensorT>::getNNodeDownCopies() const { return n_node_down_copies_; } template<typename TensorT> int ModelReplicator<TensorT>::getNNodeDownAdditions() const { return n_node_down_additions_; } template<typename TensorT> int ModelReplicator<TensorT>::getNNodeRightCopies() const { return n_node_right_copies_; } template<typename TensorT> int ModelReplicator<TensorT>::getNNodeRightAdditions() const { return n_node_right_additions_; } template<typename TensorT> int ModelReplicator<TensorT>::getNLinkAdditions() const { return n_link_additions_; } template<typename TensorT> int ModelReplicator<TensorT>::getNLinkCopies() const { return n_link_copies_; } template<typename TensorT> int ModelReplicator<TensorT>::getNNodeDeletions() const { return n_node_deletions_; } template<typename TensorT> int ModelReplicator<TensorT>::getNLinkDeletions() const { return n_link_deletions_; } template<typename TensorT> int ModelReplicator<TensorT>::getNNodeActivationChanges() const { return n_node_activation_changes_; } template<typename TensorT> int ModelReplicator<TensorT>::getNNodeIntegrationChanges() const { return n_node_integration_changes_; } template<typename TensorT> std::vector<std::pair<std::shared_ptr<ActivationOp<TensorT>>, std::shared_ptr<ActivationOp<TensorT>>>> ModelReplicator<TensorT>::getNodeActivations() const { return node_activations_; } template<typename TensorT> std::vector<std::tuple<std::shared_ptr<IntegrationOp<TensorT>>, std::shared_ptr<IntegrationErrorOp<TensorT>>, std::shared_ptr<IntegrationWeightGradOp<TensorT>>>> ModelReplicator<TensorT>::getNodeIntegrations() const { return node_integrations_; } template<typename TensorT> int ModelReplicator<TensorT>::getNModuleAdditions() const { return n_module_additions_; } template<typename TensorT> inline int ModelReplicator<TensorT>::getNModuleCopies() const { return n_module_copies_; } template<typename TensorT> int ModelReplicator<TensorT>::getNModuleDeletions() const { return n_module_deletions_; } template<typename TensorT> int ModelReplicator<TensorT>::getNWeightChanges() const { return n_weight_changes_; } template<typename TensorT> TensorT ModelReplicator<TensorT>::getWeightChangeStDev() const { return weight_change_stdev_; } template<typename TensorT> std::string ModelReplicator<TensorT>::makeUniqueHash(const std::string& left_str, const std::string& right_str) { std::chrono::time_point<std::chrono::system_clock> time_now = std::chrono::system_clock::now(); std::time_t time_now_t = std::chrono::system_clock::to_time_t(time_now); std::tm now_tm = *std::localtime(&time_now_t); char timestamp[64]; std::strftime(timestamp, 64, "%Y-%m-%d-%H-%M-%S", &now_tm); char hash_char[512]; sprintf(hash_char, "%s_%s_%s", left_str.data(), right_str.data(), timestamp); std::string hash_str(hash_char); return hash_str; } template<typename TensorT> std::vector<std::string> ModelReplicator<TensorT>::selectNodes( const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include) { // populate our list of nodes to select from std::vector<std::string> node_ids; for (const Node<TensorT>& node : model.getNodes()) { // check the exclusion list bool exclude_node = false; for (const NodeType& node_type : node_type_exclude) { if (node_type == node.getType()) { exclude_node = true; break; } } // check the inclusion list bool include_node = true; if (node_type_include.size() > 0) { include_node = false; for (const NodeType& node_type : node_type_include) { if (node_type == node.getType()) { include_node = true; break; } } } // add the node name to the list if (include_node && !exclude_node) node_ids.push_back(node.getName()); } return node_ids; } template<typename TensorT> std::vector<std::string> ModelReplicator<TensorT>::selectModules(const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include) { // populate our list of modules to select from std::set<std::string> module_name_set; for (const Node<TensorT>& node : model.getNodes()) { // check the exclusion list bool exclude_node = false; for (const NodeType& node_type : node_type_exclude) { if (node_type == node.getType()) { exclude_node = true; break; } } // check the inclusion list bool include_node = true; if (node_type_include.size() > 0) { include_node = false; for (const NodeType& node_type : node_type_include) { if (node_type == node.getType()) { include_node = true; break; } } } // add the node name to the list if (include_node && !exclude_node && !node.getModuleName().empty()) module_name_set.insert(node.getModuleName()); } std::vector<std::string> module_ids(module_name_set.begin(), module_name_set.end()); return module_ids; } //std::string ModelReplicator<TensorT>::selectRandomNode( // const Model<TensorT>& model, // const std::vector<NodeType>& node_type_exclude, // const std::vector<NodeType>& node_type_include, // const Node<TensorT>& node, // const TensorT& distance_weight, // const std::string& direction) //{ // // [TODO: add method body] //} template<typename TensorT> std::string ModelReplicator<TensorT>::selectRandomNode( const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include) { std::vector<std::string> node_ids = selectNodes(model, node_type_exclude, node_type_include); if (node_ids.size() > 0) return selectRandomElement<std::string>(node_ids); else { printf("No nodes were found that matched the inclusion/exclusion criteria.\n"); return ""; } } template<typename TensorT> std::string ModelReplicator<TensorT>::selectRandomLink( const Model<TensorT>& model, const std::vector<NodeType>& source_node_type_exclude, const std::vector<NodeType>& source_node_type_include, const std::vector<NodeType>& sink_node_type_exclude, const std::vector<NodeType>& sink_node_type_include) { // select all source and sink nodes that meet the inclusion/exclusion criteria std::vector<std::string> source_node_ids = selectNodes(model, source_node_type_exclude, source_node_type_include); if (source_node_ids.size() == 0) { printf("No source nodes were found that matched the inclusion/exclusion criteria.\n"); return ""; } std::vector<std::string> sink_node_ids = selectNodes(model, sink_node_type_exclude, sink_node_type_include); if (sink_node_ids.size() == 0) { printf("No sink nodes were found that matched the inclusion/exclusion criteria.\n"); return ""; } // find all links that have an existing connection with the source and sink node candidates std::vector<std::string> link_ids; for (const Link& link : model.getLinks()) { if (std::count(source_node_ids.begin(), source_node_ids.end(), link.getSourceNodeName()) != 0) if (std::count(sink_node_ids.begin(), sink_node_ids.end(), link.getSinkNodeName()) != 0) link_ids.push_back(link.getName()); } if (link_ids.size() > 0) return selectRandomElement<std::string>(link_ids); else { printf("No links were found that matched the node inclusion/exclusion criteria.\n"); return ""; } } template<typename TensorT> inline void ModelReplicator<TensorT>::addNodeRight(Model<TensorT>& model, std::string unique_str, bool as_copy) { // pick a random node from the model // that is not an input or bias std::vector<NodeType> node_exclusion_list = { NodeType::bias, NodeType::input, NodeType::output, NodeType::unmodifiable }; std::vector<NodeType> node_inclusion_list = { NodeType::hidden }; std::string random_node_name = selectRandomNode(model, node_exclusion_list, node_inclusion_list); if (random_node_name.empty() || random_node_name == "") { std::cout << "No nodes were added to the model." << std::endl; return; } // copy the node Node<TensorT> new_node = model.getNode(random_node_name); std::string new_node_name, add_node_name; if (as_copy) updateName(random_node_name, "%s@copyNodeRight#", unique_str, add_node_name, new_node_name); else { updateName(random_node_name, "%s@addNodeRight#", unique_str, add_node_name, new_node_name); new_node.setLayerName(""); //reset the layername } new_node.setName(new_node_name); new_node.setType(NodeType::hidden); // [TODO: add test to check for the type! model.addNodes({ new_node }); std::vector<std::string> input_link_names, output_link_names; std::vector<std::string> bias_link_names; for (const Link& link : model.getLinks()) { // find the random_nodes bias if (link.getSinkNodeName() == random_node_name && model.getNode(link.getSourceNodeName()).getType() == NodeType::bias){ bias_link_names.push_back(link.getName()); } if (link.getSinkNodeName() == random_node_name && model.getNode(link.getSourceNodeName()).getType() != NodeType::bias) { input_link_names.push_back(link.getName()); } if (link.getSourceNodeName() == random_node_name) { output_link_names.push_back(link.getName()); } } if (input_link_names.size() == 0) { std::cout << "No nodes were added to the model." << std::endl; return; } if (bias_link_names.size() != 0) { std::string new_bias_name; if (!as_copy) { // create a new bias char new_bias_name_char[512]; sprintf(new_bias_name_char, "Bias_%s@addNodeRight#", add_node_name.data()); new_bias_name = makeUniqueHash(new_bias_name_char, unique_str); Node<TensorT> new_bias(new_bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); model.addNodes({ new_bias }); } else new_bias_name = model.getLink(bias_link_names[0]).getSourceNodeName(); // create a link from the new bias to the new node std::string weight_bias_name; if (!as_copy) { char weight_bias_name_char[512]; sprintf(weight_bias_name_char, "%s_to_%s@addNodeRight#", new_bias_name.data(), new_node_name.data()); weight_bias_name = makeUniqueHash(weight_bias_name_char, unique_str); std::shared_ptr<WeightInitOp<TensorT>> bias_weight_init; bias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); Weight<TensorT> weight_bias = model.getWeight(model.getLink(bias_link_names[0]).getWeightName()); // [OPTIMIZATION: use Link.getWeightName() directly] weight_bias.setName(weight_bias_name); weight_bias.setWeightInitOp(bias_weight_init); weight_bias.setInitWeight(true); // re-initalize the new weight model.addWeights({ weight_bias }); } else weight_bias_name = model.getLink(bias_link_names[0]).getWeightName(); char link_bias_name_char[512]; if (as_copy) sprintf(link_bias_name_char, "%s_to_%s@copyNodeRight#", new_bias_name.data(), new_node_name.data()); else sprintf(link_bias_name_char, "%s_to_%s@addNodeRight#", new_bias_name.data(), new_node_name.data()); std::string link_bias_name = makeUniqueHash(link_bias_name_char, unique_str); Link link_bias(link_bias_name, new_bias_name, new_node_name, weight_bias_name); model.addLinks({ link_bias }); } // replicate all input connections for (const std::string& input_link_name : input_link_names) { // change the source to new node weight std::string weight_name; if (!as_copy) { Weight<TensorT> weight = model.getWeight(model.getLink(input_link_name).getWeightName()); // copy assignment char weight_name_char[512]; sprintf(weight_name_char, "Weight_%s_to_%s@addNodeRight#", model.getLink(input_link_name).getSourceNodeName().data(), new_node_name.data()); weight_name = makeUniqueHash(weight_name_char, unique_str); weight.setName(weight_name); weight.setInitWeight(true); // re-initalize the new weight model.addWeights({ weight }); } else weight_name = model.getLink(input_link_name).getWeightName(); // change the source to new node link Link modified_link = model.getLink(input_link_name); modified_link.setSinkNodeName(new_node_name); modified_link.setWeightName(weight_name); char modified_link_name_char[512]; if (as_copy) sprintf(modified_link_name_char, "Link_%s_to_%s@copyNodeRight#", modified_link.getSourceNodeName().data(), new_node_name.data()); else sprintf(modified_link_name_char, "Link_%s_to_%s@addNodeRight#", modified_link.getSourceNodeName().data(), new_node_name.data()); std::string modified_link_name = makeUniqueHash(modified_link_name_char, unique_str); modified_link.setName(modified_link_name); model.addLinks({ modified_link }); } // replicate all output connections for (const std::string& output_link_name : output_link_names) { // change the source to new node weight std::string weight_name; if (!as_copy) { Weight<TensorT> weight = model.getWeight(model.getLink(output_link_name).getWeightName()); // copy assignment char weight_name_char[512]; sprintf(weight_name_char, "Weight_%s_to_%s@addNodeRight#", new_node_name.data(), model.getLink(output_link_name).getSinkNodeName().data()); weight_name = makeUniqueHash(weight_name_char, unique_str); weight.setName(weight_name); weight.setInitWeight(true); // re-initalize the new weight model.addWeights({ weight }); } else weight_name = model.getLink(output_link_name).getWeightName(); // change the source to new node link Link modified_link = model.getLink(output_link_name); modified_link.setSourceNodeName(new_node_name); modified_link.setWeightName(weight_name); char modified_link_name_char[512]; if (as_copy) sprintf(modified_link_name_char, "Link_%s_to_%s@copyNodeRight#", new_node_name.data(), modified_link.getSinkNodeName().data()); else sprintf(modified_link_name_char, "Link_%s_to_%s@addNodeRight#", new_node_name.data(), modified_link.getSinkNodeName().data()); std::string modified_link_name = makeUniqueHash(modified_link_name_char, unique_str); modified_link.setName(modified_link_name); model.addLinks({ modified_link }); } } template<typename TensorT> void ModelReplicator<TensorT>::addLink( Model<TensorT>& model, std::string unique_str, bool as_copy) { // define the inclusion/exclusion nodes const std::vector<NodeType> source_node_type_exclude = { NodeType::bias, NodeType::output }; // no output can be a source const std::vector<NodeType> source_node_type_include = {}; const std::vector<NodeType> sink_node_type_exclude = { NodeType::bias, NodeType::input }; // no input can be a sink const std::vector<NodeType> sink_node_type_include = {}; // select candidate source nodes std::vector<std::string> source_node_ids = selectNodes(model, source_node_type_exclude, source_node_type_include); if (source_node_ids.size() == 0) { printf("No source nodes were found that matched the inclusion/exclusion criteria.\n"); return; } // select a random source node std::string source_node_name = selectRandomElement<std::string>(source_node_ids); // select candidate sink nodes std::vector<std::string> sink_node_ids = selectNodes(model, sink_node_type_exclude, sink_node_type_include); // remove candidate sink nodes for which a link already exists // [TODO: add test coverage] std::vector<std::string> sink_node_ids_noDuplicates; for (std::string sink_node : sink_node_ids) { bool new_link = true; for (const auto& link_map : model.links_) { if (link_map.second->getSourceNodeName() == source_node_name && link_map.second->getSinkNodeName() == sink_node) { new_link = false; break; } } if (new_link) { sink_node_ids_noDuplicates.push_back(sink_node); } } if (sink_node_ids_noDuplicates.size() == 0) { printf("No sink nodes were found that matched the inclusion/exclusion criteria.\n"); return; } // select a random sink node std::string sink_node_name = selectRandomElement<std::string>(sink_node_ids_noDuplicates); // create the new weight based on a random link (this can probably be optimized...) std::string random_link = selectRandomLink(model, source_node_type_exclude, source_node_type_include, sink_node_type_exclude, sink_node_type_include); if (random_link.empty()) { printf("No links were found that could be added to the Model.\n"); return; } std::string weight_name; if (!as_copy) { Weight<TensorT> weight = model.getWeight(model.getLink(random_link).getWeightName()); // copy assignment char weight_name_char[512]; sprintf(weight_name_char, "Weight_%s_to_%s@addLink#", source_node_name.data(), sink_node_name.data()); weight_name = makeUniqueHash(weight_name_char, unique_str); weight.setName(weight_name); weight.setInitWeight(true); // re-initalize the new weight model.addWeights({ weight }); } else weight_name = model.getLink(random_link).getWeightName(); // create the new link char link_name_char[512]; if (as_copy) sprintf(link_name_char, "Link_%s_to_%s@copyLink#", source_node_name.data(), sink_node_name.data()); else sprintf(link_name_char, "Link_%s_to_%s@addLink#", source_node_name.data(), sink_node_name.data()); std::string link_name = makeUniqueHash(link_name_char, unique_str); Link link(link_name, source_node_name, sink_node_name, weight_name); model.addLinks({ link }); } template<typename TensorT> inline void ModelReplicator<TensorT>::copyLink(Model<TensorT>& model, std::string unique_str) { addLink(model, unique_str, true); } template<typename TensorT> void ModelReplicator<TensorT>::addModule(Model<TensorT>& model, std::string unique_str, bool as_copy) { // pick a random module from the model std::vector<NodeType> node_exclusion_list = {}; std::vector<NodeType> node_inclusion_list = {}; std::string random_module_name = selectRandomModule(model, node_exclusion_list, node_inclusion_list); if (random_module_name.empty()) { std::cout << "No modules were added to the model." << std::endl; return; } // update the module name [TODO: update the module ID] std::string new_name_format; if (as_copy) new_name_format = "%s@copyModule#"; else new_name_format = "%s@addModule#"; std::string new_module_name, module_name_prefix; updateName(random_module_name, new_name_format, unique_str, module_name_prefix, new_module_name); std::string new_module_suffix; if (as_copy) new_module_suffix = makeUniqueHash("@copyModule#", unique_str); // time-stamp should be constant! else new_module_suffix = makeUniqueHash("@addModule#", unique_str); // time-stamp should be constant! // copy the module and reconnect the links std::vector<Node<TensorT>> new_nodes; std::vector<Link> new_links; std::vector<Weight<TensorT>> new_weights; std::vector<Link> connecting_links; std::vector<Weight<TensorT>> connecting_weights; for (Link& link : model.getLinks()) { if (link.getModuleName() == random_module_name) { // copy the internal nodes, weights, and links, and give them a new name/id/module_name/module_id Node<TensorT> source_node = model.getNode(link.getSourceNodeName()); std::string new_node_name, node_prefix; updateName(source_node.getName(), new_name_format, unique_str, node_prefix, new_node_name); source_node.setName(node_prefix + new_module_suffix); source_node.setModuleName(new_module_name); if (std::count(new_nodes.begin(), new_nodes.end(), source_node) == 0) new_nodes.push_back(source_node); Node<TensorT> sink_node = model.getNode(link.getSinkNodeName()); updateName(sink_node.getName(), new_name_format, unique_str, node_prefix, new_node_name); sink_node.setName(node_prefix + new_module_suffix); sink_node.setModuleName(new_module_name); if (std::count(new_nodes.begin(), new_nodes.end(), sink_node) == 0) new_nodes.push_back(sink_node); std::string weight_name; if (!as_copy) { Weight<TensorT> weight = model.getWeight(link.getWeightName()); weight.setInitWeight(true); // re-initalize the new weight std::string new_weight_name, weight_prefix; updateName(weight.getName(), new_name_format, unique_str, weight_prefix, new_weight_name); weight_name = weight_prefix + new_module_suffix; weight.setName(weight_name); weight.setModuleName(new_module_name); if (std::count(new_weights.begin(), new_weights.end(), weight) == 0) new_weights.push_back(weight); } else weight_name = link.getWeightName(); std::string new_link_name, link_prefix; updateName(link.getName(), new_name_format, unique_str, link_prefix, new_link_name); link.setName(link_prefix + new_module_suffix); link.setModuleName(new_module_name); link.setSourceNodeName(source_node.getName()); link.setSinkNodeName(sink_node.getName()); link.setWeightName(weight_name); if (std::count(new_links.begin(), new_links.end(), link) == 0) new_links.push_back(link); } else if (model.getNode(link.getSourceNodeName()).getModuleName() == random_module_name) { // copy the connecting links and weights, and give them a new name/id // and update the source node name (i.e., connect to the new module) std::string weight_name; if (!as_copy) { Weight<TensorT> weight = model.getWeight(link.getWeightName()); weight.setInitWeight(true); // re-initalize the new weight std::string new_weight_name, weight_prefix; updateName(weight.getName(), new_name_format, unique_str, weight_prefix, new_weight_name); weight_name = weight_prefix + new_module_suffix; weight.setName(weight_name); if (std::count(connecting_weights.begin(), connecting_weights.end(), weight) == 0) connecting_weights.push_back(weight); } else weight_name = link.getWeightName(); std::string new_link_name, link_prefix; updateName(link.getName(), new_name_format, unique_str, link_prefix, new_link_name); link.setName(link_prefix + new_module_suffix); std::string new_node_name, node_prefix; updateName(link.getSourceNodeName(), new_name_format, unique_str, node_prefix, new_node_name); link.setSourceNodeName(node_prefix + new_module_suffix); link.setWeightName(weight_name); if (std::count(connecting_links.begin(), connecting_links.end(), link) == 0) connecting_links.push_back(link); } else if (model.getNode(link.getSinkNodeName()).getModuleName() == random_module_name) { // copy the connecting links and weights, and give them a new name/id // and update the sink node name (i.e., connect to the new module) std::string weight_name; if (!as_copy) { Weight<TensorT> weight = model.getWeight(link.getWeightName()); weight.setInitWeight(true); // re-initalize the new weight std::string new_weight_name, weight_prefix; updateName(weight.getName(), new_name_format, unique_str, weight_prefix, new_weight_name); weight_name = weight_prefix + new_module_suffix; weight.setName(weight_name); if (std::count(connecting_weights.begin(), connecting_weights.end(), weight) == 0) connecting_weights.push_back(weight); } else weight_name = link.getWeightName(); std::string new_link_name, link_prefix; updateName(link.getName(), new_name_format, unique_str, link_prefix, new_link_name); link.setName(link_prefix + new_module_suffix); std::string new_node_name, node_prefix; updateName(link.getSinkNodeName(), new_name_format, unique_str, node_prefix, new_node_name); link.setSinkNodeName(node_prefix + new_module_suffix); link.setWeightName(weight_name); if (std::count(connecting_links.begin(), connecting_links.end(), link) == 0) connecting_links.push_back(link); } } // add the new nodes/links/weights to the model model.addNodes(new_nodes); model.addWeights(new_weights); model.addLinks(new_links); model.addWeights(connecting_weights); model.addLinks(connecting_links); } template<typename TensorT> inline void ModelReplicator<TensorT>::copyModule(Model<TensorT>& model, std::string unique_str) { addModule(model, unique_str, true); } template<typename TensorT> std::string ModelReplicator<TensorT>::selectRandomModule(const Model<TensorT>& model, const std::vector<NodeType>& node_type_exclude, const std::vector<NodeType>& node_type_include) { std::vector<std::string> module_ids = selectModules(model, node_type_exclude, node_type_include); if (module_ids.size() > 0) return selectRandomElement<std::string>(module_ids); else { printf("No nodes were found that matched the inclusion/exclusion criteria.\n"); return ""; } } template<typename TensorT> void ModelReplicator<TensorT>::copyNodeDown(Model<TensorT>& model, std::string unique_str) { addNodeDown(model, unique_str, true); } template<typename TensorT> void ModelReplicator<TensorT>::copyNodeRight(Model<TensorT>& model, std::string unique_str) { addNodeRight(model, unique_str, true); } template<typename TensorT> void ModelReplicator<TensorT>::addNodeDown(Model<TensorT>& model, std::string unique_str, bool as_copy) { // pick a random node from the model // that is not an input or bias std::vector<NodeType> node_exclusion_list = { NodeType::bias, NodeType::input, NodeType::output, NodeType::unmodifiable }; std::vector<NodeType> node_inclusion_list = { NodeType::hidden }; std::string random_node_name = selectRandomNode(model, node_exclusion_list, node_inclusion_list); if (random_node_name.empty() || random_node_name == "") { std::cout << "No nodes were added to the model." << std::endl; return; } // copy the node Node<TensorT> new_node = model.getNode(random_node_name); // select a random input link // [OPTIMIZATION: refactor to pass back the Link and not just the name] std::vector<std::string> input_link_names, bias_link_names; for (const Link& link : model.getLinks()) { if (link.getSinkNodeName() == random_node_name && model.getNode(link.getSourceNodeName()).getType() != NodeType::bias){ input_link_names.push_back(link.getName()); } if (link.getSinkNodeName() == random_node_name && model.getNode(link.getSourceNodeName()).getType() == NodeType::bias) { bias_link_names.push_back(link.getName()); } } if (input_link_names.size() == 0) { std::cout << "No nodes were added to the model." << std::endl; return; } std::string input_link_name = selectRandomElement<std::string>(input_link_names); std::string new_node_name, add_node_name; if (as_copy) updateName(random_node_name, "%s@copyNodeDown#", unique_str, add_node_name, new_node_name); else { updateName(random_node_name, "%s@addNodeDown#", unique_str, add_node_name, new_node_name); new_node.setLayerName(""); //reset the layername } new_node.setName(new_node_name); new_node.setType(NodeType::hidden); // [TODO: add test to check for the type! model.addNodes({ new_node }); if (bias_link_names.size() != 0) { std::string new_bias_name; if (!as_copy) { // create a new bias char new_bias_name_char[512]; sprintf(new_bias_name_char, "Bias_%s@addNodeDown#", add_node_name.data()); new_bias_name = makeUniqueHash(new_bias_name_char, unique_str); Node<TensorT> new_bias(new_bias_name, NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>())); model.addNodes({ new_bias }); } else new_bias_name = model.getLink(bias_link_names[0]).getSourceNodeName(); // create a link from the new bias to the new node std::string weight_bias_name; if (!as_copy) { char weight_bias_name_char[512]; sprintf(weight_bias_name_char, "%s_to_%s@addNodeDown#", new_bias_name.data(), new_node_name.data()); weight_bias_name = makeUniqueHash(weight_bias_name_char, unique_str); std::shared_ptr<WeightInitOp<TensorT>> bias_weight_init; bias_weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); Weight<TensorT> weight_bias = model.getWeight(model.getLink(bias_link_names[0]).getWeightName()); // [OPTIMIZATION: use Link.getWeightName() directly] weight_bias.setName(weight_bias_name); weight_bias.setWeightInitOp(bias_weight_init); weight_bias.setInitWeight(true); // re-initalize the new weight model.addWeights({ weight_bias }); } else weight_bias_name = model.getLink(bias_link_names[0]).getWeightName(); char link_bias_name_char[512]; if (as_copy) sprintf(link_bias_name_char, "%s_to_%s@copyNodeDown#", new_bias_name.data(), new_node_name.data()); else sprintf(link_bias_name_char, "%s_to_%s@addNodeDown#", new_bias_name.data(), new_node_name.data()); std::string link_bias_name = makeUniqueHash(link_bias_name_char, unique_str); Link link_bias(link_bias_name, new_bias_name, new_node_name, weight_bias_name); model.addLinks({ link_bias }); } // change the output node name of the link to the new copied node name Link modified_link = model.getLink(input_link_name); modified_link.setSinkNodeName(new_node_name); char modified_link_name_char[512]; if (as_copy) sprintf(modified_link_name_char, "Link_%s_to_%s@copyNodeDown#", modified_link.getSourceNodeName().data(), new_node_name.data()); else sprintf(modified_link_name_char, "Link_%s_to_%s@addNodeDown#", modified_link.getSourceNodeName().data(), new_node_name.data()); std::string modified_link_name = makeUniqueHash(modified_link_name_char, unique_str); modified_link.setName(modified_link_name); model.addLinks({ modified_link }); // add a new weight that connects the new copied node // to its original node std::string weight_name; if (!as_copy) { Weight<TensorT> weight = model.getWeight(model.getLink(input_link_name).getWeightName()); // copy assignment char weight_name_char[512]; sprintf(weight_name_char, "Weight_%s_to_%s@addNodeDown#", new_node_name.data(), random_node_name.data()); weight_name = makeUniqueHash(weight_name_char, unique_str); weight.setName(weight_name); weight.setInitWeight(true); // re-initalize the new weight model.addWeights({ weight }); } else weight_name = model.getLink(input_link_name).getWeightName(); // add a new link that connects the new copied node // to its original node char link_name_char[512]; if (as_copy) sprintf(link_name_char, "Link_%s_to_%s@copyNodeDown#", new_node_name.data(), random_node_name.data()); else sprintf(link_name_char, "Link_%s_to_%s@addNodeDown#", new_node_name.data(), random_node_name.data()); std::string link_name = makeUniqueHash(link_name_char, unique_str); Link link(link_name, new_node_name, random_node_name, weight_name); model.addLinks({ link }); // remove the unmodified link // [CHECK: is this needed? identified as a high CPU call due to prune weights] model.removeLinks({ input_link_name }); } template<typename TensorT> void ModelReplicator<TensorT>::deleteNode(Model<TensorT>& model, int prune_iterations) { // pick a random node from the model // that is not an input, bias, nor output std::vector<NodeType> node_exclusion_list = { NodeType::bias, NodeType::input, NodeType::output, NodeType::unmodifiable }; std::vector<NodeType> node_inclusion_list = { NodeType::hidden }; std::string random_node_name = selectRandomNode(model, node_exclusion_list, node_inclusion_list); // delete the node, its bias, and its bias link if (!random_node_name.empty() || random_node_name != "") // isn't this this same thing? { // std::cout<<"Random node name: "<<random_node_name<<std::endl; model.removeNodes({ random_node_name }); model.pruneModel(prune_iterations); // this action can remove additional nodes including inputs, biases, and outputs } } template<typename TensorT> void ModelReplicator<TensorT>::deleteLink(Model<TensorT>& model, int prune_iterations) { // pick a random link from the model // that does not connect from a bias or input // [TODO: need to implement a check that the deletion does not also remove an input/output node] std::vector<NodeType> source_exclusion_list = { NodeType::bias, NodeType::unmodifiable }; std::vector<NodeType> source_inclusion_list = {}; std::vector<NodeType> sink_exclusion_list = { NodeType::bias, NodeType::unmodifiable }; std::vector<NodeType> sink_inclusion_list = {}; std::string random_link_name = selectRandomLink( model, source_exclusion_list, source_inclusion_list, sink_exclusion_list, sink_inclusion_list); // delete the link and weight if required if (!random_link_name.empty() || random_link_name != "") // isn't this this same thing? { model.removeLinks({ random_link_name }); model.pruneModel(prune_iterations); // this action can remove additional nodes including inputs, biases, and outputs } } template<typename TensorT> void ModelReplicator<TensorT>::deleteModule(Model<TensorT>& model, int prune_iterations) { // pick a random module from the model std::vector<NodeType> node_exclusion_list = {}; std::vector<NodeType> node_inclusion_list = {}; std::string random_module_name = selectRandomModule(model, node_exclusion_list, node_inclusion_list); if (random_module_name.empty()) { std::cout << "No modules were deleted from the model." << std::endl; return; } // remove nodes/link/weights from the model std::vector<std::string> delete_nodes; std::vector<std::string> delete_links; std::vector<std::string> delete_weights; for (Link& link : model.getLinks()) { if (link.getModuleName() == random_module_name) { delete_links.push_back(link.getName()); delete_nodes.push_back(link.getSourceNodeName()); delete_nodes.push_back(link.getSinkNodeName()); delete_weights.push_back(link.getWeightName()); } } model.removeNodes(delete_nodes); model.removeLinks(delete_links); model.removeWeights(delete_weights); // prune the model model.pruneModel(prune_iterations); // this action can remove additional nodes including inputs, biases, and outputs } template<typename TensorT> void ModelReplicator<TensorT>::changeNodeActivation(Model<TensorT>& model, std::string unique_str) { // pick a random node from the model // that is not an input or bias or output std::vector<NodeType> node_exclusion_list = { NodeType::bias, NodeType::input, NodeType::output, NodeType::unmodifiable }; std::vector<NodeType> node_inclusion_list = { NodeType::hidden }; std::string random_node_name = selectRandomNode(model, node_exclusion_list, node_inclusion_list); if (random_node_name.empty() || random_node_name == "") { std::cout << "No node activations were changed in the model." << std::endl; return; } Node<TensorT> new_node = model.getNode(random_node_name); // copy the node std::pair<std::shared_ptr<ActivationOp<TensorT>>, std::shared_ptr<ActivationOp<TensorT>>> new_activation = selectRandomElement(node_activations_); // pick a random activation new_node.setActivation(new_activation.first); // change the activation new_node.setActivationGrad(new_activation.second); // change the activation model.removeNodes({ new_node.getName() }); // delete the original node model.addNodes({ new_node }); // add in the new node } template<typename TensorT> void ModelReplicator<TensorT>::changeNodeIntegration(Model<TensorT>& model, std::string unique_str) { // pick a random node from the model // that is not an input or bias or output std::vector<NodeType> node_exclusion_list = { NodeType::bias, NodeType::input, NodeType::output, NodeType::unmodifiable }; std::vector<NodeType> node_inclusion_list = { NodeType::hidden }; std::string random_node_name = selectRandomNode(model, node_exclusion_list, node_inclusion_list); if (random_node_name.empty() || random_node_name == "") { std::cout << "No node activations were changed in the model." << std::endl; return; } Node<TensorT> new_node = model.getNode(random_node_name); // copy the node std::tuple<std::shared_ptr<IntegrationOp<TensorT>>, std::shared_ptr<IntegrationErrorOp<TensorT>>, std::shared_ptr<IntegrationWeightGradOp<TensorT>>> new_integration = selectRandomElement(node_integrations_); // pick a random integration new_node.setIntegration(std::get<0>(new_integration)); // change the integration new_node.setIntegrationError(std::get<1>(new_integration)); // change the integration new_node.setIntegrationWeightGrad(std::get<2>(new_integration)); // change the integration model.removeNodes({ new_node.getName() }); // delete the original node model.addNodes({ new_node }); // add in the new node } template<typename TensorT> void ModelReplicator<TensorT>::modifyWeight(Model<TensorT>& model) { // [TODO: add method body] // select a random link from the model // change the weight // update the link's weight name // add the new weight back into the model // delete the previous weight } template<typename TensorT> void ModelReplicator<TensorT>::updateName(const std::string & name, const std::string & new_name_format, std::string unique_str, std::string& name_prefix, std::string& new_name) { std::regex re("@"); std::vector<std::string> str_tokens; name_prefix = name; std::copy( std::sregex_token_iterator(name.begin(), name.end(), re, -1), std::sregex_token_iterator(), std::back_inserter(str_tokens)); if (str_tokens.size() > 1) name_prefix = str_tokens[0]; // only retain the last timestamp // printf("New node name: %s\n", add_name.data()); char new_name_char[512]; sprintf(new_name_char, new_name_format.data(), name_prefix.data()); new_name = makeUniqueHash(new_name_char, unique_str); } template<typename TensorT> std::vector<std::string> ModelReplicator<TensorT>::makeRandomModificationOrder() { // create the list of modifications std::vector<std::string> modifications; for (int i = 0; i < n_node_activation_changes_; ++i) modifications.push_back("change_node_activation"); for (int i = 0; i < n_node_integration_changes_; ++i) modifications.push_back("change_node_integration"); for (int i = 0; i < n_node_down_additions_; ++i) modifications.push_back("add_node_down"); for (int i = 0; i < n_node_right_additions_; ++i) modifications.push_back("add_node_right"); for (int i = 0; i < n_node_down_copies_; ++i) modifications.push_back("copy_node_down"); for (int i = 0; i < n_node_right_copies_; ++i) modifications.push_back("copy_node_right"); for (int i = 0; i < n_link_additions_; ++i) modifications.push_back("add_link"); for (int i = 0; i < n_link_copies_; ++i) modifications.push_back("copy_link"); for (int i = 0; i < n_module_additions_; ++i) modifications.push_back("add_module"); for (int i = 0; i < n_module_copies_; ++i) modifications.push_back("copy_module"); for (int i = 0; i < n_node_deletions_; ++i) modifications.push_back("delete_node"); for (int i = 0; i < n_link_deletions_; ++i) modifications.push_back("delete_link"); for (int i = 0; i < n_module_deletions_; ++i) modifications.push_back("delete_module"); // // randomize // std::random_device seed; // std::mt19937 engine(seed()); // std::shuffle(modifications.begin(), modifications.end(), engine); return modifications; } template<typename TensorT> void ModelReplicator<TensorT>::setRandomModifications( const std::pair<int, int>& node_down_additions, const std::pair<int, int>& node_right_additions, const std::pair<int, int>& node_down_copies, const std::pair<int, int>& node_right_copies, const std::pair<int, int>& link_additions, const std::pair<int, int>& link_copies, const std::pair<int, int>& node_deletions, const std::pair<int, int>& link_deletions, const std::pair<int, int>& node_activation_changes, const std::pair<int, int>& node_integration_changes, const std::pair<int, int>& module_additions, const std::pair<int, int>& module_copies, const std::pair<int, int>& module_deletions) { node_down_additions_ = node_down_additions; node_right_additions_ = node_right_additions; node_down_copies_ = node_down_copies; node_right_copies_ = node_right_copies; link_additions_ = link_additions; link_copies_ = link_copies; node_deletions_ = node_deletions; link_deletions_ = link_deletions; node_activation_changes_ = node_activation_changes; node_integration_changes_ = node_integration_changes; module_additions_ = module_additions; module_copies_ = module_copies; module_deletions_ = module_deletions; } template<typename TensorT> inline std::vector<std::pair<int, int>> ModelReplicator<TensorT>::getRandomModifications() const { std::vector<std::pair<int, int>> random_modifications; random_modifications.push_back(node_down_additions_); random_modifications.push_back(node_right_additions_); random_modifications.push_back(node_down_copies_); random_modifications.push_back(node_right_copies_); random_modifications.push_back(link_additions_); random_modifications.push_back(link_copies_); random_modifications.push_back(node_deletions_); random_modifications.push_back(link_deletions_); random_modifications.push_back(node_activation_changes_); random_modifications.push_back(node_integration_changes_); random_modifications.push_back(module_additions_); random_modifications.push_back(module_copies_); random_modifications.push_back(module_deletions_); return random_modifications; } template<typename TensorT> void ModelReplicator<TensorT>::makeRandomModifications() { // random generator for model modifications std::random_device rd; std::mt19937 gen(rd()); // set std::uniform_int_distribution<> node_down_addition_gen(node_down_additions_.first, node_down_additions_.second); setNNodeDownAdditions(node_down_addition_gen(gen)); std::uniform_int_distribution<> node_right_addition_gen(node_right_additions_.first, node_right_additions_.second); setNNodeRightAdditions(node_right_addition_gen(gen)); std::uniform_int_distribution<> node_down_copy_gen(node_down_copies_.first, node_down_copies_.second); setNNodeDownCopies(node_down_copy_gen(gen)); std::uniform_int_distribution<> node_right_copy_gen(node_right_copies_.first, node_right_copies_.second); setNNodeRightCopies(node_right_copy_gen(gen)); std::uniform_int_distribution<> link_addition_gen(link_additions_.first, link_additions_.second); setNLinkAdditions(link_addition_gen(gen)); std::uniform_int_distribution<> link_copy_gen(link_copies_.first, link_copies_.second); setNLinkCopies(link_copy_gen(gen)); std::uniform_int_distribution<> node_deletion_gen(node_deletions_.first, node_deletions_.second); setNNodeDeletions(node_deletion_gen(gen)); std::uniform_int_distribution<> link_deletion_gen(link_deletions_.first, link_deletions_.second); setNLinkDeletions(link_deletion_gen(gen)); std::uniform_int_distribution<> node_activation_changes_gen(node_activation_changes_.first, node_activation_changes_.second); setNNodeActivationChanges(node_activation_changes_gen(gen)); std::uniform_int_distribution<> node_integration_changes_gen(node_integration_changes_.first, node_integration_changes_.second); setNNodeIntegrationChanges(node_integration_changes_gen(gen)); std::uniform_int_distribution<> module_addition_gen(module_additions_.first, module_additions_.second); setNModuleAdditions(module_addition_gen(gen)); std::uniform_int_distribution<> module_copy_gen(module_copies_.first, module_copies_.second); setNModuleCopies(module_copy_gen(gen)); std::uniform_int_distribution<> module_deletion_gen(module_deletions_.first, module_deletions_.second); setNModuleDeletions(module_deletion_gen(gen)); } template<typename TensorT> inline void ModelReplicator<TensorT>::adaptiveReplicatorScheduler(const int & n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { //TODO } template<typename TensorT> void ModelReplicator<TensorT>::modifyModel(Model<TensorT>& model, std::string unique_str, int prune_iterations) { // randomly order the modifications std::vector<std::string> modifications = makeRandomModificationOrder(); // implement each modification one at a time // and track the counts that each modification is called std::map<std::string, int> modifications_counts; for (const std::string& modification : modifications) modifications_counts.emplace(modification, 0); for (const std::string& modification : modifications) { // [TODO: copyNodeRight] if (modification == "add_node_down") { addNodeDown(model, unique_str + "-" + std::to_string(modifications_counts.at(modification))); modifications_counts[modification] += 1; } else if (modification == "add_node_right") { addNodeRight(model, unique_str + "-" + std::to_string(modifications_counts.at(modification))); modifications_counts[modification] += 1; } else if (modification == "copy_node_down") { copyNodeDown(model, unique_str + "-" + std::to_string(modifications_counts.at(modification))); modifications_counts[modification] += 1; } else if (modification == "copy_node_right") { copyNodeRight(model, unique_str + "-" + std::to_string(modifications_counts.at(modification))); modifications_counts[modification] += 1; } else if (modification == "add_link") { addLink(model, unique_str + "-" + std::to_string(modifications_counts.at(modification))); modifications_counts[modification] += 1; } else if (modification == "copy_link") { copyLink(model, unique_str + "-" + std::to_string(modifications_counts.at(modification))); modifications_counts[modification] += 1; } else if (modification == "delete_node") { deleteNode(model, prune_iterations); modifications_counts[modification] += 1; } else if (modification == "delete_link") { deleteLink(model, prune_iterations); modifications_counts[modification] += 1; } else if (modification == "change_node_activation") { changeNodeActivation(model); modifications_counts[modification] += 1; } else if (modification == "change_node_integration") { changeNodeIntegration(model); modifications_counts[modification] += 1; } else if (modification == "add_module") { addModule(model, unique_str + "-" + std::to_string(modifications_counts.at(modification))); modifications_counts[modification] += 1; } else if (modification == "copy_module") { copyModule(model, unique_str + "-" + std::to_string(modifications_counts.at(modification))); modifications_counts[modification] += 1; } else if (modification == "delete_module") { deleteModule(model, prune_iterations); modifications_counts[modification] += 1; } // [TODO: modifyWeight] } } } #endif //EVONET_MODELREPLICATOR_H<file_sep>set(core_executables_list Helloworld_test #OperationsManagerGpu_test Preprocessing_test Statistics_test StringParsing_test ) set(io_executables_list CSVWriter_test DataFile_test LinkFile_test ModelFile_test ModelInterpreterFile_test ModelInterpreterFileGpu_test NodeFile_test Parameters_test PopulationTrainerFile_test WeightFile_test ) set(graph_executables_list CircuitFinder_test ) set(ml_executables_list ActivationFunction_test ActivationFunctionTensor_test ActivationFunctionTensorGpu_test IntegrationFunction_test IntegrationFunctionTensor_test IntegrationFunctionTensorGpu_test Link_test LossFunction_test LossFunctionTensor_test LossFunctionTensorGpu_test MetricFunction_test MetricFunctionTensor_test MetricFunctionTensorGpu_test ModelBuilder_test ModelBuilderCpu_test ModelBuilderExperimental_test ModelErrorTensorData_test ModelInterpreter_DAG_test ModelInterpreter_DCG_test ModelInterpreter_IG_test ModelInterpreterCpu_test ModelInterpreterGpu_test ModelKernal_test ModelKernalGpu_test ModelLogger_test ModelReplicator_test ModelResources_test ModelTrainer_test ModelTrainerGpu_test Model_test Node_test NodeTensorData_test OpToTensorOp_test PopulationLogger_test PopulationTrainer_test PopulationTrainerGpu_test Solver_test SolverTensor_test Weight_test WeightInit_test WeightTensorData_test ) set(models_executables_list CVAEFullyConn_test ) set(simulator_executables_list BiochemicalDataSimulator_test BiochemicalReaction_test ChromatogramSimulator_test DataSimulator_test EMGModel_test PeakSimulator_test MetabolomicsClassificationDataSimulator_test MetabolomicsLatentTraversalDataSimulator_test MetabolomicsLatentUnsClassDataSimulator_test MetabolomicsReconstructionDataSimulator_test MNISTSimulator_test ) ### collect test executables set(TEST_executables ${core_executables_list} ${io_executables_list} ${ml_executables_list} ${models_executables_list} ${graph_executables_list} ${simulator_executables_list} ) <file_sep> cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) project("SmartPeak_tests") option(ENABLE_STYLE_TESTING "Enables checking of code convention violations (cpplint) and static code analysis (cppchecker). Note that this will disable the regular test system." OFF) option(ENABLE_CLASS_TESTING "Enables tests for library classes. Should be disabled only on time constraints (e.g. chunking during continuous integration)." ON) # why is this not called below? add_subdirectory(class_tests) #------------------------------------------------------------------------------ # we only test if we have no package target if("${PACKAGE_TYPE}" STREQUAL "none") if(ENABLE_STYLE_TESTING) add_subdirectory(coding) else() ## configure the regular class .. if(ENABLE_CLASS_TESTING) add_subdirectory(class_tests) endif() endif(ENABLE_STYLE_TESTING) endif("${PACKAGE_TYPE}" STREQUAL "none")<file_sep>cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) project("EvoNet") #------------------------------------------------------------------------------ # naming conventions: # # prefix a variable with 'CF_' if it is used to configure a file! # e.g., CF_LibEvoNetExport set(CF_EVONET_PACKAGE_VERSION "${EVONET_PACKAGE_VERSION_MAJOR}.${EVONET_PACKAGE_VERSION_MINOR}.${EVONET_PACKAGE_VERSION_PATCH}" CACHE INTERNAL "EvoNet VERSION" FORCE) #------------------------------------------------------------------------------ # En/disable assertions if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") set(CF_EVONET_ASSERTIONS 1) else() set(CF_EVONET_ASSERTIONS 0) endif() set(CF_EVONET_ASSERTIONS ${CF_EVONET_ASSERTIONS} CACHE INTERNAL "Enables debug messages (precondition and postconditions are enabled, a bit slower) - this is NOT changing any compiler flags!" FORCE) #------------------------------------------------------------------------------ # external libs (contrib or system) #------------------------------------------------------------------------------ include(${PROJECT_SOURCE_DIR}/cmake_findExternalLibs.cmake) #------------------------------------------------------------------------------ # At this point make a summary of where data and doc will be located: message(STATUS "Info: CF_EVONET_DATA_PATH: ${CF_EVONET_DATA_PATH}") message(STATUS "Info: CF_EVONET_DOC_PATH: ${CF_EVONET_DOC_PATH}") #------------------------------------------------------------------------------ # configure config.h #------------------------------------------------------------------------------ include(${PROJECT_SOURCE_DIR}/configh.cmake) #------------------------------------------------------------------------------ # big include file for headers and cpp files, that fills the EvoNet_sources variable include (${PROJECT_SOURCE_DIR}/includes.cmake) #------------------------------------------------------------------------------ # all the dependency libraries are linked into libEvoNet.so set(EVONET_DEP_LIBRARIES ${Boost_LIBRARIES}) # xerces requires linking against CoreFoundation&CoreServices if(APPLE) find_library(CoreFoundation_LIBRARY CoreFoundation ) find_library(CoreServices_LIBRARY CoreServices ) set(EVONET_DEP_LIBRARIES ${EVONET_DEP_LIBRARIES} ${CoreFoundation_LIBRARY} ${CoreServices_LIBRARY}) endif() if (TBB_FOUND) list(APPEND EVONET_DEP_LIBRARIES ${TBB_LIBRARIES}) endif() if (MSVC) list(APPEND EVONET_DEP_LIBRARIES opengl32.lib) endif() evonet_add_library(TARGET_NAME EvoNet SOURCE_FILES ${EvoNet_sources} HEADER_FILES ${EvoNet_sources_h} ${EvoNet_configured_headers} INTERNAL_INCLUDES ${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_BINARY_DIR}/include EXTERNAL_INCLUDES ${EIGEN3_INCLUDE_DIR} ${CEREAL_INCLUDE_DIRS} LINK_LIBRARIES ${APPLE_EXTRA_LIBS} ${EVONET_DEP_LIBRARIES} DLL_EXPORT_PATH "EvoNet/") #------------------------------------------------------------------------------ # since the share basically belongs to EvoNet core we control its installation # here # Note: that slash(/) is important here, otherwise the whole directory # (not its content) will be copied! install_directory(${EVONET_HOST_DIRECTORY}/share/EvoNet/ ${INSTALL_SHARE_DIR} share) #------------------------------------------------------------------------------ # register relevant paths for the doxygen doc generation evonet_doc_path("${PROJECT_SOURCE_DIR}/include") <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_ACTIVATIONTENSORFUNCTION_H #define EVONET_ACTIVATIONTENSORFUNCTION_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif #include <EvoNet/ml/ActivationFunction.h> #include <unsupported/Eigen/CXX11/Tensor> #include <unsupported/Eigen/MatrixFunctions> //#include <cereal/access.hpp> // serialiation of private members //#undef min // clashes with std::limit on windows in polymorphic.hpp //#undef max // clashes with std::limit on windows in polymorphic.hpp //#include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Base class for all activation function wrappers. */ template<typename TensorT, typename DeviceT> class ActivationTensorOp { public: ActivationTensorOp() = default; ActivationTensorOp(const TensorT& eps, const TensorT& min, const TensorT& max) : eps_(eps), min_(min), max_(max) {}; virtual ~ActivationTensorOp() = default; virtual std::string getName() const = 0; virtual void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const = 0; void setEps(const TensorT& eps) { eps_ = eps; } void setMin(const TensorT& min) { min_ = min; } void setMax(const TensorT& max) { max_ = max; } TensorT getEps() const { return eps_; } TensorT getMin() const { return min_; } TensorT getMax() const { return max_; } protected: TensorT eps_ = TensorT(1e-24); ///< threshold to clip between min and max TensorT min_ = TensorT(-1e9); TensorT max_ = TensorT(1e9); //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) {} }; /** @brief Rectified Linear Unit (ReLU) activation function References: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2000). Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit. Nature. 405. pp. 947–951. */ template<typename TensorT, typename DeviceT> class ReLUTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = (x.chip(time_step, 1) >= x.chip(time_step, 1).constant(TensorT(0))).select(x.chip(time_step, 1), x.chip(time_step, 1).constant(TensorT(0))); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); //std::cout << "[ReLUTensorOp] Time step " << time_step << " : " << out.chip(time_step, 1) << std::endl; // DEBUGGING... }; std::string getName() const{return "ReLUTensorOp";}; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Rectified Linear Unit (ReLU) gradient References: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2000). Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit. Nature. 405. pp. 947–951. */ template<typename TensorT, typename DeviceT> class ReLUGradTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = (x.chip(time_step, 1) >= x.chip(time_step, 1).constant(TensorT(0))).select(x.chip(time_step, 1).constant(TensorT(1)), x.chip(time_step, 1).constant(TensorT(0))); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const{return "ReLUGradTensorOp";}; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Exponential Linear Unit (ELU) activation function References: <NAME>; <NAME>; <NAME> (2015). "Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)". arXiv:1511.07289 */ template<typename TensorT, typename DeviceT> class ELUTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: ELUTensorOp() = default; ~ELUTensorOp() = default; ELUTensorOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& alpha) : ActivationTensorOp<TensorT, DeviceT>(eps, min, max), alpha_(alpha) {}; ELUTensorOp(const TensorT& alpha): alpha_(alpha){}; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = (x.chip(time_step, 1) > x.chip(time_step, 1).constant(TensorT(0))).select( x.chip(time_step, 1), x.chip(time_step, 1).constant(alpha_) * (x.chip(time_step, 1).exp() - x.chip(time_step, 1).constant(TensorT(1)))); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; void setAlpha(const TensorT& alpha) { alpha_ = alpha; }; TensorT getAlpha() const { return alpha_; }; std::string getName() const{return "ELUTensorOp";}; private: //friend class cereal::access; //template<class Archive> //void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this), alpha_); //} TensorT alpha_ = 1; }; /** @brief Exponential Linear Unit (ELU) gradient References: <NAME>; <NAME>; <NAME> (2015). "Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)". arXiv:1511.07289 */ template<typename TensorT, typename DeviceT> class ELUGradTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: ELUGradTensorOp() = default; ~ELUGradTensorOp() = default; ELUGradTensorOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& alpha) : ActivationTensorOp<TensorT, DeviceT>(eps, min, max), alpha_(alpha) {}; ELUGradTensorOp(const TensorT& alpha): alpha_(alpha){}; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = (x.chip(time_step, 1) > x.chip(time_step, 1).constant(TensorT(0))).select( x.chip(time_step, 1).constant(TensorT(1)), (x.chip(time_step, 1) > x.chip(time_step, 1).constant(TensorT(0))).select(x.chip(time_step, 1), x.chip(time_step, 1).constant(alpha_) * (x.chip(time_step, 1).exp() - x.chip(time_step, 1).constant(TensorT(1)))) + x.chip(time_step, 1).constant(alpha_)); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; void setAlpha(const TensorT& alpha) { alpha_ = alpha; }; TensorT getAlpha() const { return alpha_; }; std::string getName() const{return "ELUGradTensorOp";}; private: //friend class cereal::access; //template<class Archive> //void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this), alpha_); //} TensorT alpha_ = 1; }; /** @brief Sigmoid activation function */ template<typename TensorT, typename DeviceT> class SigmoidTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = x.chip(time_step, 1).sigmoid(); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const{return "SigmoidTensorOp";}; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Sigmoid gradient */ template<typename TensorT, typename DeviceT> class SigmoidGradTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = x.chip(time_step, 1).sigmoid() * (x.chip(time_step, 1).constant(TensorT(1)) - x.chip(time_step, 1).sigmoid()); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const{return "SigmoidGradTensorOp";}; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Hyperbolic Tangent activation function */ template<typename TensorT, typename DeviceT> class TanHTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = x.chip(time_step, 1).tanh(); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const{return "TanHTensorOp";}; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Hyperbolic Tangent gradient */ template<typename TensorT, typename DeviceT> class TanHGradTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = x.chip(time_step, 1).constant(TensorT(1)) - (x.chip(time_step, 1).tanh()).pow((TensorT)2); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const{return "TanHGradTensorOp";}; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Rectified Hyperbolic Tangent activation function */ template<typename TensorT, typename DeviceT> class ReTanHTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); //out.chip(time_step, 1).device(device) = x.chip(time_step, 1).unaryExpr(ReTanHOp<TensorT>()); //out.chip(time_step, 1).device(device) = [TODO] }; std::string getName() const{return "ReTanHTensorOp";}; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Rectified Hyperbolic Tangent gradient */ template<typename TensorT, typename DeviceT> class ReTanHGradTensorOp: public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); //out.chip(time_step, 1).device(device) = x.chip(time_step, 1).unaryExpr(ReTanHGradOp<TensorT>()); //out.chip(time_step, 1).device(device) = [TODO] }; std::string getName() const{return "ReTanHGradTensorOp";}; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Linear activation function */ template<typename TensorT, typename DeviceT> class LinearTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = x.chip(time_step, 1); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const { return "LinearTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Linear gradient */ template<typename TensorT, typename DeviceT> class LinearGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); out.chip(time_step, 1).device(device) = x.chip(time_step, 1).constant(TensorT(1)); }; std::string getName() const { return "LinearGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Inverse activation function */ template<typename TensorT, typename DeviceT> class InverseTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); // Temporary memory for computation TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif // Cap small values by selection auto x_clipped_neg = (x.chip(time_step, 1) > x.chip(time_step, 1).constant(1 / this->getMin()) && x.chip(time_step, 1) < x.chip(time_step, 1).constant(TensorT(0))).select( x.chip(time_step, 1).constant(1 / this->getMin()), x.chip(time_step, 1)); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> x_clipped_pos(tmp_data, batch_size, layer_size); x_clipped_pos.device(device) = (x_clipped_neg <= x_clipped_neg.constant(1 / this->getMax()) && x_clipped_neg > x_clipped_neg.constant(TensorT(0))).select( x_clipped_neg.constant(1 / this->getMax()), x_clipped_neg); // Remove 0 by selection auto result = (x_clipped_pos != x_clipped_pos.constant(TensorT(0))).select( x_clipped_pos.constant(TensorT(1)) / x_clipped_pos, x_clipped_pos.constant(TensorT(0))); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; std::string getName() const { return "InverseTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Inverse gradient */ template<typename TensorT, typename DeviceT> class InverseGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); // Temporary memory for computation TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif // Cap small values by selection auto x_clipped_neg = (x.chip(time_step, 1) > x.chip(time_step, 1).constant(1 / this->getMin()) && x.chip(time_step, 1) < x.chip(time_step, 1).constant(TensorT(0))).select( x.chip(time_step, 1).constant(1 / this->getMin()), x.chip(time_step, 1)); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> x_clipped_pos(tmp_data, batch_size, layer_size); x_clipped_pos.device(device) = (x_clipped_neg <= x_clipped_neg.constant(1 / this->getMax()) && x_clipped_neg > x_clipped_neg.constant(TensorT(0))).select( x_clipped_neg.constant(1 / this->getMax()), x_clipped_neg); // Remove 0 by selection auto result = (x_clipped_pos != x_clipped_pos.constant(TensorT(0))).select( x_clipped_pos.constant(TensorT(-1)) / x_clipped_pos.pow(2), x_clipped_pos.constant(TensorT(0))); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; std::string getName() const { return "InverseGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Exponential activation function */ template<typename TensorT, typename DeviceT> class ExponentialTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); TensorT maxT = log(this->getMax()); auto result = x.chip(time_step, 1).clip(this->getMin(), maxT).exp(); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const { return "ExponentialTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Exponential gradient */ template<typename TensorT, typename DeviceT> class ExponentialGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); TensorT maxT = log(this->getMax()); auto result = x.chip(time_step, 1).clip(this->getMin(), maxT).exp(); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const { return "ExponentialGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Log activation function */ template<typename TensorT, typename DeviceT> class LogTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = x.chip(time_step, 1).clip(this->getEps(), this->getMax()).log(); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const { return "LogTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Log gradient */ template<typename TensorT, typename DeviceT> class LogGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); // Temporary memory for computation TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size* layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif // Cap small values by selection auto x_clipped_neg = (x.chip(time_step, 1) > x.chip(time_step, 1).constant(1/this->getMin()) && x.chip(time_step, 1) < x.chip(time_step, 1).constant(TensorT(0))).select( x.chip(time_step, 1).constant(1/this->getMin()), x.chip(time_step, 1)); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> x_clipped_pos(tmp_data, batch_size, layer_size); x_clipped_pos.device(device) = (x_clipped_neg <= x_clipped_neg.constant(1/this->getMax()) && x_clipped_neg > x_clipped_neg.constant(TensorT(0))).select( x_clipped_neg.constant(1/this->getMax()), x_clipped_neg); // Remove 0 by selection auto result = (x_clipped_pos != x_clipped_pos.constant(TensorT(0))).select( x_clipped_pos.constant(TensorT(1)) / x_clipped_pos, x_clipped_pos.constant(TensorT(0))); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; std::string getName() const { return "LogGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Pow activation function */ template<typename TensorT, typename DeviceT> class PowTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: PowTensorOp() = default; ~PowTensorOp() = default; PowTensorOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& base) : ActivationTensorOp<TensorT, DeviceT>(eps, min, max), base_(base) {}; PowTensorOp(const TensorT& base): base_(base){}; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); TensorT maxT = (base_ >= TensorT(1))? pow(this->getMax(), 1 / base_): this->getMax(); TensorT minT = ((base_ < TensorT(1) && base_ > TensorT(0)) || (base_ > TensorT(-1) && base_ < TensorT(0))) ? TensorT(0): this->getMin(); auto result = x.chip(time_step, 1).clip(minT, maxT).pow(base_); // NOTE there is still the case where base_ < 0 and x == 0 to deal with out.chip(time_step, 1).device(device) = (result == result).select(result.clip(this->getMin(), this->getMax()), result.constant(TensorT(0))); }; std::string getName() const { return "PowTensorOp"; }; private: //friend class cereal::access; //template<class Archive> //void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this), base_); //} TensorT base_; }; /** @brief Pow gradient */ template<typename TensorT, typename DeviceT> class PowGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: PowGradTensorOp() = default; ~PowGradTensorOp() = default; PowGradTensorOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& base) : ActivationTensorOp<TensorT, DeviceT>(eps, min, max), base_(base) {}; PowGradTensorOp(const TensorT& base) : base_(base) {}; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); TensorT maxT = (base_ >= TensorT(2)) ? pow(this->getMax(), 1 / (base_ - TensorT(1))) : this->getMax(); TensorT minT = ((base_ < TensorT(2) && base_ > TensorT(1)) || (base_ > TensorT(0) && base_ < TensorT(1))) ? TensorT(0) : this->getMin(); auto result = x.chip(time_step, 1).constant(base_) * x.chip(time_step, 1).clip(minT, maxT).pow(base_ - TensorT(1)); // NOTE there is still the case where base_ < 0 and x == 0 to deal with out.chip(time_step, 1).device(device) = (result == result).select(result.clip(this->getMin(), this->getMax()), result.constant(TensorT(0))); }; std::string getName() const { return "PowGradTensorOp"; }; private: //friend class cereal::access; //template<class Archive> //void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this), base_); //} TensorT base_; }; /** @brief LeakyReLU activation function default alpha = 1e-2 */ template<typename TensorT, typename DeviceT> class LeakyReLUTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: LeakyReLUTensorOp() = default; ~LeakyReLUTensorOp() = default; LeakyReLUTensorOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& alpha) : ActivationTensorOp<TensorT, DeviceT>(eps, min, max), alpha_(alpha) {}; LeakyReLUTensorOp(const TensorT& alpha) : alpha_(alpha) {}; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = (x.chip(time_step, 1) >= x.chip(time_step, 1).constant(TensorT(0))).select( x.chip(time_step, 1), x.chip(time_step, 1) * x.chip(time_step, 1).constant(alpha_)); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; void setAlpha(const TensorT& alpha) { alpha_ = alpha; }; TensorT getAlpha() const { return alpha_; }; std::string getName() const { return "LeakyReLUTensorOp"; }; private: //friend class cereal::access; //template<class Archive> //void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this), alpha_); //} TensorT alpha_ = 1e-2; }; /** @brief LeakyReLU gradient */ template<typename TensorT, typename DeviceT> class LeakyReLUGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: LeakyReLUGradTensorOp() = default; ~LeakyReLUGradTensorOp() = default; LeakyReLUGradTensorOp(const TensorT& eps, const TensorT& min, const TensorT& max, const TensorT& alpha) : ActivationTensorOp<TensorT, DeviceT>(eps, min, max), alpha_(alpha) {}; LeakyReLUGradTensorOp(const TensorT& alpha) : alpha_(alpha) {}; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto result = (x.chip(time_step, 1) >= x.chip(time_step, 1).constant(TensorT(0))).select( x.chip(time_step, 1).constant(TensorT(1)), x.chip(time_step, 1).constant(alpha_)); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; void setAlpha(const TensorT& alpha) { alpha_ = alpha; }; TensorT getAlpha() const { return alpha_; }; std::string getName() const { return "LeakyReLUGradTensorOp"; }; private: //friend class cereal::access; //template<class Archive> //void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this), alpha_); //} TensorT alpha_ = (TensorT)1e-2; }; /** @brief Sin activation function */ template<typename TensorT, typename DeviceT> class SinTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); //auto result = x.chip(time_step, 1).sin(); //out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const { return "SinTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Sin gradient */ template<typename TensorT, typename DeviceT> class SinGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); //auto result = x.chip(time_step, 1).cos(); //out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const { return "SinGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Cos activation function */ template<typename TensorT, typename DeviceT> class CosTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); //auto result = x.chip(time_step, 1).cos(); //out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const { return "CosTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Cos gradient */ template<typename TensorT, typename DeviceT> class CosGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); //auto result = -x.chip(time_step, 1).sin(); //out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()); }; std::string getName() const { return "CosGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief BatchNorm activation function */ template<typename TensorT, typename DeviceT> class BatchNormTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> x(x_I, batch_size, 1, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto mean = x.chip(time_step, 2).mean(Eigen::array<Eigen::Index, 1>({ 0 })).broadcast(Eigen::array<Eigen::Index, 2>({batch_size, 1})); // 2 dims auto var = (x.chip(time_step, 2).chip(0, 1) - mean).pow(TensorT(2)) / mean.constant(TensorT(batch_size)); auto result = (var <= var.constant(TensorT(0))).select(var.constant(TensorT(0)), //x.chip(time_step, 2).chip(0, 1) (x.chip(time_step, 2).chip(0, 1) - mean) / var.sqrt()); out.chip(time_step, 1).device(device) = result.clip(this->getMin(), this->getMax()).eval(); }; std::string getName() const { return "BatchNormTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief BatchNorm gradient ddx((xi-mu)/var.sqrt()) = ddx(xi-mu)/var.sqrt() + (xi-mu)*ddx(var.pow(-1/2)) = ddx(xi-mu)/var.sqrt() + (xi-mu)*(-0.5*var.pow(-3/2))*ddx(var) = ddx(xi-mu)/var.sqrt() + (xi-mu)*(-0.5*var.pow(-3/2))*ddx(SUM(xi-mu)/N) = (N-1)/N/var.sqrt() - (xi-mu)*(2/N)*var.pow(-3/2) where ddx(SUM(xi-mu)/N) = 2(xi-mu)/N and ddx(xi-mu) = (N-1)/N and ddx(xi) = 1 and ddx(mu) = 1/N References: see https://math.stackexchange.com/questions/2836083/derivative-of-the-variance-wrt-x-i for the derivative of the variance ddx(xi/var.sqrt()) = ddx(xi)/var.sqrt() + xi/ddx(var.sqrt()) = 1/var.sqrt() - xi * var.pow(-3/2) * (xi-mu)/N */ template<typename TensorT, typename DeviceT> class BatchNormGradTensorOp : public ActivationTensorOp<TensorT, DeviceT> { public: using ActivationTensorOp<TensorT, DeviceT>::ActivationTensorOp; void operator()(TensorT* x_I, TensorT* x_O, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> x(x_I, batch_size, 1, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> out(x_O, batch_size, memory_size, layer_size); auto mean = x.chip(time_step, 2).mean(Eigen::array<Eigen::Index, 1>({ 0 })).broadcast(Eigen::array<Eigen::Index, 2>({ batch_size, 1 })); // 2 dims auto var = (x.chip(time_step, 2).chip(0, 1) - mean).pow(TensorT(2)) / mean.constant(TensorT(batch_size)); // Temporary memory for computation TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size* layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> result(tmp_data, batch_size, layer_size); result.device(device) = var.constant(TensorT(batch_size - 1)/TensorT(batch_size)) * var.pow(-1 / 2) - var.constant(TensorT(2) / TensorT(batch_size)) * (x.chip(time_step, 2).chip(0, 1) - mean) * var.pow(-3/2); //auto result = var.pow(-1 / 2) - // var.constant(1 / TensorT(batch_size)) * x.chip(time_step, 2).chip(0, 1) * (x.chip(time_step, 2).chip(0, 1) - mean) * var.pow(-3 / 2); out.chip(time_step, 1).device(device) = (result == result).select(result.clip(this->getMin(), this->getMax()), result.constant(TensorT(0))).eval(); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; std::string getName() const { return "BatchNormGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<ActivationTensorOp<TensorT, DeviceT>>(this)); // } }; template<typename TensorT, typename DeviceT> struct GradientCheckTensorOp { void operator()(TensorT* x_I, TensorT* x_f_plus, TensorT* x_f_neg, TensorT* x_b, TensorT* diff, const int& batch_size, const int& memory_size, const int& layer_size, const int& time_step, DeviceT& device) const { // create the forward propogation offsets Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x_I_values(x_I, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x_f_plus_values(x_f_plus, batch_size, memory_size, layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x_f_neg_values(x_f_neg, batch_size, memory_size, layer_size); x_f_plus_values.device(device) = x_I_values + x_I_values.constant(eps_); x_f_neg_values.device(device) = x_I_values - x_I_values.constant(eps_); // calculate the approximate gradient forward_->operator()(x_f_plus, x_f_plus, batch_size, memory_size, layer_size, time_step, device); forward_->operator()(x_f_neg, x_f_neg, batch_size, memory_size, layer_size, time_step, device); auto gradapprox = (x_f_plus_values.chip(time_step, 1) - x_f_neg_values.chip(time_step, 1)) / x_f_plus_values.chip(time_step, 1).constant(TensorT(2) * eps_); std::cout << "gradapprox\n" << gradapprox << std::endl; // calculate the true gradient reverse_->operator()(x_I, x_b, batch_size, memory_size, layer_size, time_step, device); // calculate the normalized difference across each batch Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> x_b_values(x_b, batch_size, memory_size, layer_size); std::cout << "x_b_values\n" << x_b_values.chip(time_step, 1) << std::endl; auto numerator = (x_b_values.chip(time_step, 1) - gradapprox).pow(2).sum().sqrt(); auto denominator = x_b_values.chip(time_step, 1).pow(2).sum().sqrt() + gradapprox .pow(2).sum().sqrt(); Eigen::TensorMap<Eigen::Tensor<TensorT, 0>> diff_value(diff); auto result = (denominator == denominator.constant(0)).select(denominator.constant(0), numerator / denominator); diff_value.device(device) = result; } TensorT eps_ = TensorT(1e-7); std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> forward_ = nullptr; std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> reverse_ = nullptr; }; } //CEREAL_REGISTER_TYPE(EvoNet::ReLUTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ReLUGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ELUTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ELUGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SigmoidTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SigmoidGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::TanHTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::TanHGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ReTanHTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ReTanHGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LinearTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LinearGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::InverseTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::InverseGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ExponentialTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ExponentialGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LogTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LogGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::PowTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::PowGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SinTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SinGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CosTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CosGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::BatchNormTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::BatchNormGradTensorOp<float, Eigen::DefaultDevice>); // //#if COMPILE_WITH_CUDA //CEREAL_REGISTER_TYPE(EvoNet::ReLUTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ReLUGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ELUTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ELUGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SigmoidTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SigmoidGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::TanHTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::TanHGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ReTanHTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ReTanHGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LinearTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LinearGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::InverseTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::InverseGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ExponentialTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ExponentialGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LogTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LogGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::PowTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::PowGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::LeakyReLUGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SinTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SinGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CosTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CosGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::BatchNormTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::BatchNormGradTensorOp<float, Eigen::GpuDevice>); //#endif // //// TODO: double, int, etc., #endif //EVONET_ACTIVATIONTENSORFUNCTION_H<file_sep>#------------------------------------------------------------------------------ # This cmake files bundles all the multithreading related stuff of the SmartPeak # build system. #------------------------------------------------------------------------------ # TBB #------------------------------------------------------------------------------ set(MT_TBB_INCLUDE_DIR CACHE PATH "Intel Threading Building Blocks 'include' directory.") set(MT_TBB_LIBRARY_DIR CACHE PATH "Intel Threading Building Blocks libraries directory.") message(STATUS "Intel TBB: ${MT_ENABLE_TBB}") if (MT_ENABLE_TBB) find_package(TBB) if (NOT TBB_FOUND) message(FATAL_ERROR "TBB not found but requested.") endif() endif() if (TBB_FOUND) INCLUDE_DIRECTORIES(${TBB_INCLUDE_DIRS}) add_definitions(/DSMARTPEAK_HAS_TBB) endif() #------------------------------------------------------------------------------ # OpenMP #------------------------------------------------------------------------------ if (MT_ENABLE_OPENMP) find_package(OpenMP) endif() message(STATUS "OpenMP: ${MT_ENABLE_OPENMP}") if (OPENMP_FOUND) # do NOT use add_definitions() here, because RC.exe on windows will fail set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") if (NOT MSVC) set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${OpenMP_CXX_FLAGS}") endif() endif() <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE NodeTensorData test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/NodeTensorData.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(nodeTensorData) BOOST_AUTO_TEST_CASE(constructor) { NodeTensorDataCpu<float>* ptr = nullptr; NodeTensorDataCpu<float>* nullPointer = nullptr; ptr = new NodeTensorDataCpu<float>(); BOOST_CHECK_NE(ptr, nullPointer); delete ptr; } BOOST_AUTO_TEST_CASE(destructor) { NodeTensorDataCpu<float>* ptr = nullptr; ptr = new NodeTensorDataCpu<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(comparison) { NodeTensorDataCpu<float> node, node_test; BOOST_CHECK(node == node_test); } #if COMPILE_WITH_CUDA BOOST_AUTO_TEST_CASE(gettersAndSetters2) { NodeTensorDataGpu<float> node; node.setBatchSize(2); node.setMemorySize(3); node.setLayerSize(4); Eigen::Tensor<float, 3> input(2, 3, 4), output(2, 3, 4), derivative(2, 3, 4), error(2, 3, 4), dt(2, 3, 4); input.setConstant(0.5); output.setConstant(1); derivative.setConstant(2); error.setConstant(3); dt.setConstant(4); node.setInput(input); node.setOutput(output); node.setDerivative(derivative); node.setError(error); node.setDt(dt); BOOST_CHECK_EQUAL(node.getBatchSize(), 2); BOOST_CHECK_EQUAL(node.getMemorySize(), 3); BOOST_CHECK_EQUAL(node.getLayerSize(), 4); BOOST_CHECK_EQUAL(node.getInput()(1, 2, 3), 0.5); BOOST_CHECK(node.getInputStatus().first); BOOST_CHECK(!node.getInputStatus().second); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 1); BOOST_CHECK(node.getOutputStatus().first); BOOST_CHECK(!node.getOutputStatus().second); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 2); BOOST_CHECK(node.getDerivativeStatus().first); BOOST_CHECK(!node.getDerivativeStatus().second); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 3); BOOST_CHECK(node.getErrorStatus().first); BOOST_CHECK(!node.getErrorStatus().second); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 4); BOOST_CHECK(node.getDtStatus().first); BOOST_CHECK(!node.getDtStatus().second); // Test mutability node.getInput()(0, 0, 0) = 5; node.getOutput()(0, 0, 0) = 6; node.getDerivative()(0, 0, 0) = 7; node.getError()(0, 0, 0) = 8; node.getDt()(0, 0, 0) = 9; BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 5); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 6); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 7); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 8); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 9); } BOOST_AUTO_TEST_CASE(syncHAndD2) { NodeTensorDataGpu<float> node; node.setBatchSize(2); node.setMemorySize(3); node.setLayerSize(4); Eigen::Tensor<float, 3> input(2, 3, 4), output(2, 3, 4), derivative(2, 3, 4), error(2, 3, 4), dt(2, 3, 4); input.setConstant(0.5); output.setConstant(1); derivative.setConstant(2); error.setConstant(3); dt.setConstant(4); node.setInput(input); node.setOutput(output); node.setDerivative(derivative); node.setError(error); node.setDt(dt); Eigen::GpuStreamDevice stream_device; Eigen::GpuDevice device(&stream_device); node.syncHAndDInput(device); node.syncHAndDOutput(device); node.syncHAndDDerivative(device); node.syncHAndDError(device); node.syncHAndDDt(device); BOOST_CHECK(!node.getInputStatus().first); BOOST_CHECK(node.getInputStatus().second); BOOST_CHECK(!node.getOutputStatus().first); BOOST_CHECK(node.getOutputStatus().second); BOOST_CHECK(!node.getDerivativeStatus().first); BOOST_CHECK(node.getDerivativeStatus().second); BOOST_CHECK(!node.getErrorStatus().first); BOOST_CHECK(node.getErrorStatus().second); BOOST_CHECK(!node.getDtStatus().first); BOOST_CHECK(node.getDtStatus().second); node.syncHAndDInput(device); node.syncHAndDOutput(device); node.syncHAndDDerivative(device); node.syncHAndDError(device); node.syncHAndDDt(device); BOOST_CHECK(node.getInputStatus().first); BOOST_CHECK(!node.getInputStatus().second); BOOST_CHECK(node.getOutputStatus().first); BOOST_CHECK(!node.getOutputStatus().second); BOOST_CHECK(node.getDerivativeStatus().first); BOOST_CHECK(!node.getDerivativeStatus().second); BOOST_CHECK(node.getErrorStatus().first); BOOST_CHECK(!node.getErrorStatus().second); BOOST_CHECK(node.getDtStatus().first); BOOST_CHECK(!node.getDtStatus().second); } #endif BOOST_AUTO_TEST_CASE(gettersAndSetters) { NodeTensorDataCpu<float> node; node.setBatchSize(2); node.setMemorySize(3); node.setLayerSize(4); size_t test = 2 * 3 * 4 * sizeof(float); BOOST_CHECK_EQUAL(node.getTensorSize(), test); node.setLayerIntegration("SumOp"); BOOST_CHECK_EQUAL(node.getLayerIntegration(), "SumOp"); } BOOST_AUTO_TEST_CASE(gettersAndSetters1) { NodeTensorDataCpu<float> node; node.setBatchSize(2); node.setMemorySize(3); node.setLayerSize(4); Eigen::Tensor<float, 3> input(2, 3, 4), output(2, 3, 4), derivative(2, 3, 4), error(2, 3, 4), dt(2, 3, 4); input.setConstant(0); output.setConstant(1); derivative.setConstant(2); error.setConstant(3); dt.setConstant(4); node.setInput(input); node.setOutput(output); node.setDerivative(derivative); node.setError(error); node.setDt(dt); BOOST_CHECK_EQUAL(node.getBatchSize(), 2); BOOST_CHECK_EQUAL(node.getMemorySize(), 3); BOOST_CHECK_EQUAL(node.getLayerSize(), 4); BOOST_CHECK_EQUAL(node.getInput()(1, 2, 3), 0); BOOST_CHECK(node.getInputStatus().first); BOOST_CHECK(node.getInputStatus().second); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 1); BOOST_CHECK(node.getOutputStatus().first); BOOST_CHECK(node.getOutputStatus().second); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 2); BOOST_CHECK(node.getDerivativeStatus().first); BOOST_CHECK(node.getDerivativeStatus().second); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 3); BOOST_CHECK(node.getErrorStatus().first); BOOST_CHECK(node.getErrorStatus().second); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 4); BOOST_CHECK(node.getDtStatus().first); BOOST_CHECK(node.getDtStatus().second); // Test mutability node.getInput()(0, 0, 0) = 5; node.getOutput()(0, 0, 0) = 6; node.getDerivative()(0, 0, 0) = 7; node.getError()(0, 0, 0) = 8; node.getDt()(0, 0, 0) = 9; BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 5); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 6); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 7); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 8); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 9); } BOOST_AUTO_TEST_CASE(syncHAndD) { NodeTensorDataCpu<float> node; node.setBatchSize(2); node.setMemorySize(3); node.setLayerSize(4); Eigen::Tensor<float, 3> input(2, 3, 4), output(2, 3, 4), derivative(2, 3, 4), error(2, 3, 4), dt(2, 3, 4); input.setConstant(0.5); output.setConstant(1); derivative.setConstant(2); error.setConstant(3); dt.setConstant(4); node.setInput(input); node.setOutput(output); node.setDerivative(derivative); node.setError(error); node.setDt(dt); Eigen::DefaultDevice device; node.syncHAndDInput(device); node.syncHAndDOutput(device); node.syncHAndDDerivative(device); node.syncHAndDError(device); node.syncHAndDDt(device); BOOST_CHECK(node.getInputStatus().first); BOOST_CHECK(node.getInputStatus().second); BOOST_CHECK(node.getOutputStatus().first); BOOST_CHECK(node.getOutputStatus().second); BOOST_CHECK(node.getDerivativeStatus().first); BOOST_CHECK(node.getDerivativeStatus().second); BOOST_CHECK(node.getErrorStatus().first); BOOST_CHECK(node.getErrorStatus().second); BOOST_CHECK(node.getDtStatus().first); BOOST_CHECK(node.getDtStatus().second); node.syncHAndDInput(device); node.syncHAndDOutput(device); node.syncHAndDDerivative(device); node.syncHAndDError(device); node.syncHAndDDt(device); BOOST_CHECK(node.getInputStatus().first); BOOST_CHECK(node.getInputStatus().second); BOOST_CHECK(node.getOutputStatus().first); BOOST_CHECK(node.getOutputStatus().second); BOOST_CHECK(node.getDerivativeStatus().first); BOOST_CHECK(node.getDerivativeStatus().second); BOOST_CHECK(node.getErrorStatus().first); BOOST_CHECK(node.getErrorStatus().second); BOOST_CHECK(node.getDtStatus().first); BOOST_CHECK(node.getDtStatus().second); } BOOST_AUTO_TEST_CASE(initNodeTensorData) { NodeTensorDataCpu<float> node; node.initNodeTensorData(2, 5, 4, NodeType::hidden, "SumOp", true); // Test the batch and memory sizes BOOST_CHECK_EQUAL(node.getBatchSize(), 2); BOOST_CHECK_EQUAL(node.getMemorySize(), 5); BOOST_CHECK_EQUAL(node.getLayerSize(), 4); BOOST_CHECK_EQUAL(node.getLayerIntegration(), "SumOp"); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::bias, "SumOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::input, "SumOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::unmodifiable, "SumOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::recursive, "SumOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::hidden, "ProdOp", true); BOOST_CHECK_EQUAL(node.getLayerIntegration(), "ProdOp"); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::bias, "ProdOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::input, "ProdOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::unmodifiable, "ProdOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::recursive, "ProdOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::hidden, "ProdSCOp", true); BOOST_CHECK_EQUAL(node.getLayerIntegration(), "ProdSCOp"); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::bias, "ProdSCOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::input, "ProdSCOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::unmodifiable, "ProdSCOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); node.initNodeTensorData(2, 5, 4, NodeType::recursive, "ProdSCOp", true); BOOST_CHECK_EQUAL(node.getInput()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4, 3), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getError()(0, 0, 0), 0.0); BOOST_CHECK_EQUAL(node.getError()(1, 4, 3), 0.0); BOOST_CHECK_EQUAL(node.getDt()(0, 0, 0), 1.0); BOOST_CHECK_EQUAL(node.getDt()(1, 4, 3), 1.0); } BOOST_AUTO_TEST_SUITE_END()<file_sep>### the directory name set(directory include/EvoNet/io) ### list all header files of the directory here set(sources_list_h csv.h CSVWriter.h DataFile.h LinkFile.h ModelFile.h ModelInterpreterFile.h ModelInterpreterFileDefaultDevice.h ModelInterpreterFileGpu.h NodeFile.h Parameters.h PopulationTrainerFile.h WeightFile.h ) ### add path to the filenames set(sources_h) foreach(i ${sources_list_h}) list(APPEND sources_h ${directory}/${i}) endforeach(i) ### source group definition source_group("Header Files\\EvoNet\\io" FILES ${sources_h}) set(EvoNet_sources_h ${EvoNet_sources_h} ${sources_h}) <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_SOLVERTENSOR_H #define EVONET_SOLVERTENSOR_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif #include <unsupported/Eigen/CXX11/Tensor> #include <random> #include <iostream> namespace EvoNet { /** @brief Base class for all solvers. Clipping reference: <NAME>, <NAME>, <NAME> (2013) On the difficulty of training Recurrent Neural Networks arXiv:1211.5063 [cs.LG] Gradient Noise with annealed variance reference: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2015). Adding Gradient Noise Improves Learning for Very Deep Networks, 1–11. Retrieved from http://arxiv.org/abs/1511.06807 <NAME> and <NAME>. 2011. Bayesian learning via stochastic gradient langevin dynamics. In Proceedings of the 28th International Conference on International Conference on Machine Learning (ICML'11), Lise Getoor and <NAME> (Eds.). Omnipress, USA, 681-688. */ template<typename TensorT, typename DeviceT> class SolverTensorOp { public: SolverTensorOp() = default; SolverTensorOp(const TensorT& gradient_threshold) : gradient_threshold_(gradient_threshold) {}; SolverTensorOp(const TensorT& gradient_threshold, const TensorT& gradient_noise_sigma, const TensorT& gradient_noise_gamma) : gradient_threshold_(gradient_threshold), gradient_noise_sigma_(gradient_noise_sigma), gradient_noise_gamma_(gradient_noise_gamma) {}; virtual ~SolverTensorOp() = default; virtual std::string getName() const = 0; void setLearningRate(const TensorT& learning_rate) { learning_rate_ = learning_rate; }; TensorT getLearningRate() const { return learning_rate_; }; void setGradientThreshold(const TensorT& gradient_threshold){gradient_threshold_ = gradient_threshold;}; TensorT getGradientThreshold() const{return gradient_threshold_;}; virtual void operator()(TensorT* weights, TensorT* errors, TensorT* solver_params, const int& source_layer_size, const int& sink_layer_size, const int& iter, DeviceT& device) = 0; void setGradientNoiseSigma(const TensorT& gradient_noise_sigma){gradient_noise_sigma_ = gradient_noise_sigma;}; TensorT getGradientNoiseSigma() const{return gradient_noise_sigma_;}; void setGradientNoiseGamma(const TensorT& gradient_noise_gamma){gradient_noise_gamma_ = gradient_noise_gamma;}; TensorT getGradientNoiseGamma() const{return gradient_noise_gamma_;}; void setEps(const TensorT& eps) { eps_ = eps; }; TensorT getEps() const { return eps_; } //virtual std::string getParameters() const = 0; TensorT annealGradientNoiseSigma(const TensorT& iter) { const TensorT sigma_annealed = gradient_noise_sigma_ / std::pow((1 + iter), gradient_noise_gamma_); // annealed variance return sigma_annealed; } private: TensorT gradient_threshold_ = TensorT(1e6); ///< maximum gradient magnitude for gradient clipping TensorT learning_rate_ = TensorT(1e-3); ///< the learning rate TensorT gradient_noise_sigma_ = TensorT(0.0); ///< variance before annealing (0.0 = none, 1.0 = normal distribution with mean = 0 and var = 1.0) for gradient noise TensorT gradient_noise_gamma_ = TensorT(0.55); ///< iter-dependend annealing factor for gradient noise TensorT eps_ = TensorT(1e-24); }; /** @brief Stochastic Gradient Descent (SGD) with momentum Solver. */ template<typename TensorT, typename DeviceT> class SGDTensorOp: public SolverTensorOp<TensorT, DeviceT> { public: using SolverTensorOp<TensorT, DeviceT>::SolverTensorOp; /* @brief SGD solver operator @params weights Data for the weight tensor @params error Data for the weight tensor errors @params solver_params Data for the solver params (Dim 2, size 3: learning rate, momentum, momentum_prev) @param source_layer_size Dim 0 @param sink_layer_size Dim 1 */ void operator()(TensorT* weights, TensorT* errors, TensorT* solver_params, const int& source_layer_size, const int& sink_layer_size, const int& iter, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weights_tensor(weights, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> errors_tensor(errors, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> solver_params_tensor(solver_params, source_layer_size, sink_layer_size, 3); // Remove Nans auto errors_no_nans = (errors_tensor == errors_tensor).select(errors_tensor, errors_tensor.constant(TensorT(0))); // Gradient clipping auto clip = errors_no_nans.abs() > errors_no_nans.constant(this->getGradientThreshold()); auto errors_clipped = clip.select(errors_no_nans * errors_no_nans.constant(this->getGradientThreshold()) / errors_no_nans.abs(), errors_no_nans); // Gradient noise auto noise = weights_tensor.random()*weights_tensor.constant(this->annealGradientNoiseSigma(iter + 1)); auto errors_noise = errors_clipped + noise; // Weight updates solver_params_tensor.chip(2, 2).device(device) = solver_params_tensor.chip(1, 2) * solver_params_tensor.chip(2,2) + (errors_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2)) * errors_noise; auto velocity_unbiased = solver_params_tensor.chip(2, 2) / (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2).pow(iter + 1)); weights_tensor.device(device) -= solver_params_tensor.chip(0, 2) * velocity_unbiased.eval(); }; void setMomentum(const TensorT& momentum) { momentum_ = momentum; }; TensorT getMomentum() const { return momentum_; }; std::string getName() const{return "SGDTensorOp";}; private: TensorT momentum_ = TensorT(0.9); ///< Momentum }; /** @brief SSD Stochastic Gradient Descent Solver. References: <NAME>, <NAME>. Dissecting Adam: The Sign, Magnitude and Variance of Stochastic Gradients. arXiv:1705.07774, 2017. */ template<typename TensorT, typename DeviceT> class SSDTensorOp : public SolverTensorOp<TensorT, DeviceT> { public: using SolverTensorOp<TensorT, DeviceT>::SolverTensorOp; /* @brief Stochastic sign descent (SSD) solver operator @params weights Data for the weight tensor @params error Data for the weight tensor errors @params solver_params Data for the solver params (Dim 2, size 3: learning rate, momentum, momentum_prev) @param source_layer_size Dim 0 @param sink_layer_size Dim 1 */ void operator()(TensorT* weights, TensorT* errors, TensorT* solver_params, const int& source_layer_size, const int& sink_layer_size, const int& iter, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weights_tensor(weights, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> errors_tensor(errors, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> solver_params_tensor(solver_params, source_layer_size, sink_layer_size, 3); // Gradient noise auto noise = weights_tensor.random()*weights_tensor.constant(this->annealGradientNoiseSigma(iter + 1)); auto errors_noise = errors_tensor + noise; // Remove Nans and return the sign of the gradient auto errors_sign = (errors_noise == errors_noise).select(errors_noise / errors_noise.abs(), errors_noise.constant(TensorT(0))); // Weight updates (omitting the bias correction step) solver_params_tensor.chip(2, 2).device(device) = solver_params_tensor.chip(1, 2) * solver_params_tensor.chip(2, 2) + (errors_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2)) * errors_sign; weights_tensor.device(device) -= solver_params_tensor.chip(0, 2) * solver_params_tensor.chip(2, 2); }; void setMomentum(const TensorT& momentum) { momentum_ = momentum; }; TensorT getMomentum() const { return momentum_; }; std::string getName() const { return "SSDTensorOp"; }; private: TensorT momentum_ = TensorT(0.9); ///< Momentum }; /** @brief Adam Solver. References: <NAME>, <NAME>. Adam: A Method for Stochastic TensorOptimization. International Conference for Learning Representations, 2015. */ template<typename TensorT, typename DeviceT> class AdamTensorOp: public SolverTensorOp<TensorT, DeviceT> { public: using SolverTensorOp<TensorT, DeviceT>::SolverTensorOp; /* @brief ADAM solver operator @params weights Data for the weight tensor @params errorr Data for the weight tensor errors @params solver_params Data for the solver params (Dim 2, size 6: learning rate, momentum, mementum2, delta, momentum_prev, momentum2_prev) @param source_layer_size Dim 0 @param sink_layer_size Dim 1 */ void operator()(TensorT* weights, TensorT* errors, TensorT* solver_params, const int& source_layer_size, const int& sink_layer_size, const int& iter, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weights_tensor(weights, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> errors_tensor(errors, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> solver_params_tensor(solver_params, source_layer_size, sink_layer_size, 6); // Remove Nans auto errors_no_nans = (errors_tensor == errors_tensor).select(errors_tensor, errors_tensor.constant(TensorT(0))); // Gradient clipping auto clip = errors_no_nans.abs() > errors_no_nans.constant(this->getGradientThreshold()); auto errors_clipped = clip.select(errors_no_nans * errors_no_nans.constant(this->getGradientThreshold()) / errors_no_nans.abs(), errors_no_nans); // Gradient noise auto noise = weights_tensor.random()*weights_tensor.constant(this->annealGradientNoiseSigma(iter + 1)); auto errors_noise = errors_clipped + noise; // Weight updates (omitting the bias correction step) solver_params_tensor.chip(4, 2).device(device) = solver_params_tensor.chip(1, 2) * solver_params_tensor.chip(4, 2) + (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2)) * errors_noise; solver_params_tensor.chip(5, 2).device(device) = solver_params_tensor.chip(2, 2) * solver_params_tensor.chip(5, 2) + (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(2, 2)) * errors_noise.pow(2); auto unbiased_adam1 = solver_params_tensor.chip(4, 2) / (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2));// .pow(iter + 1)); auto unbiased_adam2 = solver_params_tensor.chip(5, 2) / (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(2, 2));// .pow(iter + 1)); auto result = solver_params_tensor.chip(0, 2) * unbiased_adam1.eval() / (unbiased_adam2.eval().sqrt() + solver_params_tensor.chip(3, 2)); weights_tensor.device(device) -= (result == result).select(result, result.constant(TensorT(0))); }; void setMomentum(const TensorT& momentum) { momentum_ = momentum; }; TensorT getMomentum() const { return momentum_; }; void setMomentum2(const TensorT& momentum2) { momentum2_ = momentum2; }; TensorT getMomentum2() const { return momentum2_; }; void setDelta(const TensorT& delta) { delta_ = delta; }; TensorT getDelta() const { return delta_; }; std::string getName() const{return "AdamTensorOp";}; private: TensorT momentum_ = TensorT(0.9); ///< Momentum TensorT momentum2_ = TensorT(0.999); ///< Momentum2 TensorT delta_ = TensorT(1e-8); ///< Delta }; /** @brief Stochastic Variance-Adapted Gradient (SVAG) Solver. References: <NAME>, <NAME>. Dissecting Adam: The Sign, Magnitude and Variance of Stochastic Gradients. arXiv:1705.07774, 2017. */ template<typename TensorT, typename DeviceT> class SVAGTensorOp : public SolverTensorOp<TensorT, DeviceT> { public: using SolverTensorOp<TensorT, DeviceT>::SolverTensorOp; /* @brief SVAG solver operator @params weights Data for the weight tensor @params errorr Data for the weight tensor errors @params solver_params Data for the solver params (Dim 2, size 4: learning rate, momentum, momentum_prev, variance_prev) @param source_layer_size Dim 0 @param sink_layer_size Dim 1 */ void operator()(TensorT* weights, TensorT* errors, TensorT* solver_params, const int& source_layer_size, const int& sink_layer_size, const int& iter, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weights_tensor(weights, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> errors_tensor(errors, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> solver_params_tensor(solver_params, source_layer_size, sink_layer_size, 4); // Remove Nans auto errors_no_nans = (errors_tensor == errors_tensor).select(errors_tensor, errors_tensor.constant(TensorT(0))); // Gradient clipping auto clip = errors_no_nans.abs() > errors_no_nans.constant(this->getGradientThreshold()); auto errors_clipped = clip.select(errors_no_nans * errors_no_nans.constant(this->getGradientThreshold()) / errors_no_nans.abs(), errors_no_nans); // Gradient noise auto noise = weights_tensor.random()*weights_tensor.constant(this->annealGradientNoiseSigma(iter + 1)); auto errors_noise = errors_clipped + noise; // Calculate Rho TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[source_layer_size*sink_layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = source_layer_size*sink_layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> rho(tmp_data, source_layer_size, sink_layer_size); //rho.device(device) = (( // (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2)) * (weights_tensor.constant(TensorT(1)) + solver_params_tensor.chip(1, 2).pow(iter + 1))) / ( // (weights_tensor.constant(TensorT(1)) + solver_params_tensor.chip(1, 2)) * (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2)).pow(iter + 1)) // ).clip(TensorT(0), TensorT(1)); rho.device(device) = (( (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2)).sqrt() * (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2).pow(iter + 1).sqrt())) / ( (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2).pow(iter + 1)).sqrt() * (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2).sqrt())) ).clip(TensorT(0), TensorT(1)); // Calculate momentum and variance estimates solver_params_tensor.chip(2, 2).device(device) = solver_params_tensor.chip(1, 2) * solver_params_tensor.chip(2, 2) + (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2)) * errors_noise; solver_params_tensor.chip(3, 2).device(device) = solver_params_tensor.chip(1, 2) * solver_params_tensor.chip(3, 2) + (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2)) * errors_noise.pow(2); auto unbiased_mean = solver_params_tensor.chip(2, 2) / (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2).pow(iter + 1)); auto unbiased_var = solver_params_tensor.chip(3, 2) / (weights_tensor.constant(TensorT(1)) - solver_params_tensor.chip(1, 2).pow(iter + 1)); auto var_estimate = (rho > rho.constant(TensorT(1) - this->getEps())).select( (unbiased_var - unbiased_mean.pow(2)) / (weights_tensor.constant(TensorT(1)) - rho), rho.constant(TensorT(0))); auto gamma = unbiased_mean.pow(2) / (unbiased_mean.pow(2) + rho * var_estimate); // Weight updates weights_tensor.device(device) -= solver_params_tensor.chip(0, 2) * gamma.eval() * unbiased_mean.eval(); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; std::string getName() const { return "SVAGTensorOp"; }; }; /** @brief Dummy solver that prevents weight update. */ template<typename TensorT, typename DeviceT> class DummySolverTensorOp : public SolverTensorOp<TensorT, DeviceT> { public: using SolverTensorOp<TensorT, DeviceT>::SolverTensorOp; void operator()(TensorT* weights, TensorT* errors, TensorT* solver_params, const int& source_layer_size, const int& sink_layer_size, const int& iter, DeviceT& device) { }; std::string getName() const { return "DummySolverTensorOp"; }; }; /** @brief Random Solver. [TODO: add method body and tests] */ /** @brief Hebian Solver. [TODO: add method body and tests] */ /** @brief SM-G-ABS (Safe mutation gradient) Solver. [TODO: add method body and tests] References: <NAME>, <NAME>, <NAME>, <NAME> (2018). Safe Mutations for Deep and Recurrent Neural Networks through Output Gradients. arXiv:1712.06563 */ } #endif //EVONET_SOLVERTENSOR_H<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/models/CVAEFullyConnDefaultDevice.h> #include <EvoNet/simulator/MNISTSimulator.h> #include <EvoNet/simulator/DataSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; using namespace EvoNetParameters; // Extended template<typename TensorT> class DataSimulatorExt : public MNISTSimulator<TensorT> { public: int n_encodings_; int n_categorical_; int encodings_traversal_iter_ = 0; int categorical_traversal_iter_ = 0; void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); const int n_input_pixels = this->validation_data.dimension(1); assert(n_output_nodes == n_input_pixels + 2 * n_encodings_ + n_categorical_); // mu, logvar, logalpha assert(n_metric_output_nodes == n_input_pixels); assert(n_input_nodes == n_input_pixels + n_encodings_ + 2 * n_categorical_); // Guassian sampler, Gumbel sampler, inverse tau // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, 1); // Reformat the input data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // Gaussian Sampler Eigen::Tensor<TensorT, 2> gaussian_samples = GaussianSampler<TensorT>(1, n_encodings_); // Concrete Sampler Eigen::Tensor<TensorT, 2> categorical_samples = GumbelSampler<TensorT>(1, n_categorical_); TensorT inverse_tau = 3.0 / 2.0; //1.0 / 0.5; // Madison 2017 recommended 2/3 for tau // Assign the input/output values for (int nodes_iter = 0; nodes_iter < n_input_pixels; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices(batch_iter), nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices(batch_iter), nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices(batch_iter), nodes_iter); if (nodes_iter < n_encodings_) { input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = gaussian_samples(0, nodes_iter); // sample from a normal distribution loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = 0; // Dummy data for KL divergence mu loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + n_encodings_) = 0; // Dummy data for KL divergence logvar } if (nodes_iter < n_categorical_) { input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + n_encodings_) = categorical_samples(0, nodes_iter); // sample from gumbel distribution input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + n_encodings_ + n_categorical_) = inverse_tau; // inverse tau loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + 2 * n_encodings_) = 0; // Dummy data for the KL divergence cat } } } } } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); const int n_input_pixels = this->validation_data.dimension(1); assert(n_output_nodes == n_input_pixels + 2 * n_encodings_ + n_categorical_); // mu, logvar, logalpha assert(n_metric_output_nodes == n_input_pixels); assert(n_input_nodes == n_input_pixels + n_encodings_ + 2 * n_categorical_); // Guassian sampler, Gumbel sampler, inverse tau // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, 1); // Reformat the input data for validation for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // Gaussian Sampler Eigen::Tensor<TensorT, 2> gaussian_samples = GaussianSampler<TensorT>(1, n_encodings_); // Concrete Sampler Eigen::Tensor<TensorT, 2> categorical_samples = GumbelSampler<TensorT>(1, n_categorical_); TensorT inverse_tau = 3.0 / 2.0; //1.0 / 0.5; // Madison 2017 recommended 2/3 for tau // Assign the input/output values for (int nodes_iter = 0; nodes_iter < n_input_pixels; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices(batch_iter), nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices(batch_iter), nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices(batch_iter), nodes_iter); if (nodes_iter < n_encodings_) { input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = gaussian_samples(0, nodes_iter); // sample from a normal distribution loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = 0; // Dummy data for KL divergence mu loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + n_encodings_) = 0; // Dummy data for KL divergence logvar } if (nodes_iter < n_categorical_) { input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + n_encodings_) = categorical_samples(0, nodes_iter); // sample from gumbel distribution input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + n_encodings_ + n_categorical_) = inverse_tau; // inverse tau loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + 2 * n_encodings_) = 0; // Dummy data for KL divergence cat } } } } } void simulateEvaluationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) override { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); assert(n_input_nodes == n_encodings_ + n_categorical_); // Guassian encoding, Gumbel categorical // Initialize the gaussian encodings to random and all categorical encodings to 0 input_data = input_data.constant(TensorT(0)); // initialize the input to 0; Eigen::array<Eigen::Index, 3> offsets = { 0, 0, 0 }; Eigen::array<Eigen::Index, 3> extents = { batch_size, memory_size, n_encodings_ }; input_data.slice(offsets, extents) = input_data.slice(offsets, extents).random(); // Assign the encoding values by sampling the 95% confidence limits of the inverse normal distribution const TensorT step_size = (0.95 - 0.05) / batch_size; input_data.chip(encodings_traversal_iter_, 2) = (input_data.chip(encodings_traversal_iter_, 2).constant(step_size).cumsum(0) + input_data.chip(encodings_traversal_iter_, 2).constant(TensorT(0.05))).ndtri(); // Assign the categorical values input_data.chip(n_encodings_ + categorical_traversal_iter_, 2) = input_data.chip(n_encodings_ + categorical_traversal_iter_, 2).constant(TensorT(1)); // Increase the traversal iterators encodings_traversal_iter_ += 1; if (encodings_traversal_iter_ >= n_encodings_) { encodings_traversal_iter_ = 0; categorical_traversal_iter_ += 1; } if (categorical_traversal_iter_ >= n_categorical_) { categorical_traversal_iter_ = 0; } } }; /** @brief Pixel reconstruction MNIST example whereby all pixels are linearized and read into the model. The model then attempts to reconstruction the pixels using an Auto Encoder network where the labels of the images are disentangled from the style of the images using a concrete distribution and gaussian distribution, respectively Data processing: - whole image pixels (linearized) 28x28 normalized to 0 to 1 */ template<class ...ParameterTypes> void main_(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the data simulator const std::size_t n_pixels = 784; const std::size_t training_data_size = 60000; //60000; const std::size_t validation_data_size = 10000; //10000; DataSimulatorExt<float> data_simulator; // read in the training data std::string training_data_filename = std::get<EvoNetParameters::General::DataDir>(parameters).get() + "train-images.idx3-ubyte"; std::string training_labels_filename = std::get<EvoNetParameters::General::DataDir>(parameters).get() + "train-labels.idx1-ubyte"; data_simulator.readData(training_data_filename, training_labels_filename, true, training_data_size, n_pixels); // read in the validation data std::string validation_data_filename = std::get<EvoNetParameters::General::DataDir>(parameters).get() + "t10k-images.idx3-ubyte"; std::string validation_labels_filename = std::get<EvoNetParameters::General::DataDir>(parameters).get() + "t10k-labels.idx1-ubyte"; data_simulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, n_pixels); data_simulator.unitScaleData(); data_simulator.n_encodings_ = std::get<EvoNetParameters::ModelTrainer::NEncodingsContinuous>(parameters).get(); data_simulator.n_categorical_ = std::get<EvoNetParameters::ModelTrainer::NEncodingsCategorical>(parameters).get(); // Make the input nodes std::vector<std::string> input_nodes; makeInputNodes(input_nodes, n_pixels); // Make the encoding nodes and add them to the input makeGaussianEncodingSamplerNodes(input_nodes, args...); makeCategoricalEncodingSamplerNodes(input_nodes, args...); makeCategoricalEncodingTauNodes(input_nodes, args...); // Make the output nodes std::vector<std::string> output_nodes = makeOutputNodes(n_pixels); std::vector<std::string> encoding_nodes_mu = makeMuEncodingNodes(args...); std::vector<std::string> encoding_nodes_logvar = makeLogVarEncodingNodes(args...); std::vector<std::string> encoding_nodes_logalpha = makeAlphaEncodingNodes(args...); std::vector<std::string> categorical_softmax_nodes = makeCategoricalSoftmaxNodes(args...); // define the model trainers and resources for the trainers CVAEFullyConnDefaultDevice<float> model_trainer; setModelTrainerParameters(model_trainer, args...); model_trainer.KL_divergence_warmup_ = std::get<EvoNetParameters::ModelTrainer::KLDivergenceWarmup>(parameters).get(); model_trainer.beta_c_ = std::get<EvoNetParameters::ModelTrainer::Beta>(parameters).get(); model_trainer.capacity_c_ = std::get<EvoNetParameters::ModelTrainer::CapacityC>(parameters).get(); model_trainer.capacity_d_ = std::get<EvoNetParameters::ModelTrainer::CapacityD>(parameters).get(); model_trainer.learning_rate_ = std::get<EvoNetParameters::ModelTrainer::LearningRate>(parameters).get(); model_trainer.gradient_clipping_ = std::get<EvoNetParameters::ModelTrainer::GradientClipping>(parameters).get(); model_trainer.classification_loss_weight_ = std::get<EvoNetParameters::ModelTrainer::LossFncWeight0>(parameters).get(); model_trainer.supervision_warmup_ = std::get<EvoNetParameters::Examples::SupervisionWarmup>(parameters).get(); model_trainer.supervision_percent_ = std::get<EvoNetParameters::Examples::SupervisionPercent>(parameters).get(); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper1, loss_function_helper2, loss_function_helper3, loss_function_helper4; loss_function_helper1.output_nodes_ = output_nodes; loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0)) }; loss_function_helpers.push_back(loss_function_helper1); loss_function_helper2.output_nodes_ = encoding_nodes_mu; loss_function_helper2.loss_functions_ = { std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper2); loss_function_helper3.output_nodes_ = encoding_nodes_logvar; loss_function_helper3.loss_functions_ = { std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper3.loss_function_grads_ = { std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper3); loss_function_helper4.output_nodes_ = encoding_nodes_logalpha; loss_function_helper4.loss_functions_ = { std::make_shared<KLDivergenceCatLossOp<float>>(KLDivergenceCatLossOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helper4.loss_function_grads_ = { std::make_shared<KLDivergenceCatLossGradOp<float>>(KLDivergenceCatLossGradOp<float>(1e-6, 0.0, 0.0)) }; loss_function_helpers.push_back(loss_function_helper4); model_trainer.setLossFunctionHelpers(loss_function_helpers); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>()) }; metric_function_helper1.metric_names_ = { "MAE" }; metric_function_helpers.push_back(metric_function_helper1); model_trainer.setMetricFunctionHelpers(metric_function_helpers); // define the model and resources Model<float> model; std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; ModelInterpreterFileDefaultDevice<float> model_interpreter_file; makeModelAndInterpreters(model, model_trainer, model_interpreters, model_interpreter_file, n_pixels, args...); // define the model logger ModelLogger<float> model_logger(true, true, true, false, false, true, false, true); // Train the model std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); } //void traverseLatentSpace(const std::string& data_dir, const bool& make_model) { // // // define the model logger // ModelLogger<float> model_logger(true, true, false, false, false, true, false, true); // // // define the data simulator // const std::size_t n_pixels = 784; // const std::size_t encoding_size = 8; // const std::size_t categorical_size = 10; // const std::size_t n_hidden = 512; // DataSimulatorExt<float> data_simulator; // data_simulator.n_encodings_ = encoding_size; // data_simulator.n_categorical_ = categorical_size; // // // Make the input nodes // std::vector<std::string> input_nodes; // // // Make the encoding nodes and add them to the input // for (int i = 0; i < encoding_size; ++i) { // char name_char[512]; // sprintf(name_char, "Gaussian_encoding_%012d", i); // std::string name(name_char); // input_nodes.push_back(name); // } // for (int i = 0; i < categorical_size; ++i) { // char name_char[512]; // sprintf(name_char, "Categorical_encoding-SoftMax-Out_%012d", i); // std::string name(name_char); // input_nodes.push_back(name); // } // // // Make the output nodes // std::vector<std::string> output_nodes; // for (int i = 0; i < n_pixels; ++i) { // char name_char[512]; // sprintf(name_char, "Output_%012d", i); // std::string name(name_char); // output_nodes.push_back(name); // } // // // define the model trainers and resources for the trainers // ModelResources model_resources = { ModelDevice(0, 1) }; // ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); // // ModelTrainerExt<float> model_trainer; // model_trainer.setBatchSize(8); // determines the number of samples across the latent dimension // model_trainer.setNEpochsEvaluation(encoding_size * categorical_size); // determined by the number of latent dimensions to traverse // model_trainer.setMemorySize(1); // model_trainer.setVerbosityLevel(1); // model_trainer.setLogging(false, false, true); // model_trainer.setFindCycles(false); // model_trainer.setFastInterpreter(true); // // std::vector<LossFunctionHelper<float>> loss_function_helpers; // LossFunctionHelper<float> loss_function_helper1; // loss_function_helper1.output_nodes_ = output_nodes; // loss_function_helper1.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0)) }; // loss_function_helper1.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0 )) }; // loss_function_helpers.push_back(loss_function_helper1); // model_trainer.setLossFunctionHelpers(loss_function_helpers); // // std::vector<MetricFunctionHelper<float>> metric_function_helpers; // MetricFunctionHelper<float> metric_function_helper1; // metric_function_helper1.output_nodes_ = output_nodes; // metric_function_helper1.metric_functions_ = { std::make_shared<MAEOp<float>>(MAEOp<float>()) }; // metric_function_helper1.metric_names_ = { "MAE" }; // metric_function_helpers.push_back(metric_function_helper1); // model_trainer.setMetricFunctionHelpers(metric_function_helpers); // // // build the decoder and update the weights from the trained model // Model<float> model; // if (make_model) { // std::cout << "Making the model..." << std::endl; // model_trainer.makeCVAEDecoder(model, n_pixels, categorical_size, encoding_size, n_hidden); // std::cout << "Reading in the trained model weights..." << std::endl; // const std::string model_filename = data_dir + "CVAE_model.binary"; // ModelFile<float> model_file; // model_file.loadWeightValuesBinary(model_filename, model.weights_); // // // check that all weights were read in correctly // for (auto& weight_map : model.getWeightsMap()) { // if (weight_map.second->getInitWeight()) { // std::cout << "Model " << model.getName() << " Weight " << weight_map.first << " has not be initialized." << std::endl;; // } // } // } // else { // // read in the trained model // std::cout << "Reading in the model..." << std::endl; // const std::string model_filename = data_dir + "CVAEDecoder_model.binary"; // const std::string interpreter_filename = data_dir + "CVAEDecoder_interpreter.binary"; // ModelFile<float> model_file; // model_file.loadModelBinary(model_filename, model); // model.setId(1); // model.setName("CVAEDecoder1"); // ModelInterpreterFileDefaultDevice<float> model_interpreter_file; // model_interpreter_file.loadModelInterpreterBinary(interpreter_filename, model_interpreter); // } // // // traverse the latent space (evaluation) // Eigen::Tensor<float, 4> values = model_trainer.evaluateModel(model, data_simulator, input_nodes, model_logger, model_interpreter); //} int main(int argc, char** argv) { // Parse the user commands int id_int = -1; std::string parameters_filename = ""; parseCommandLineArguments(argc, argv, id_int, parameters_filename); // Set the parameter names and defaults EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::General::OutputDir output_dir("output_dir", std::string("")); EvoNetParameters::Main::DeviceId device_id("device_id", 0); EvoNetParameters::Main::ModelName model_name("model_name", ""); EvoNetParameters::Main::MakeModel make_model("make_model", true); EvoNetParameters::Main::LoadModelCsv load_model_csv("load_model_csv", false); EvoNetParameters::Main::LoadModelBinary load_model_binary("load_model_binary", false); EvoNetParameters::Main::TrainModel train_model("train_model", true); EvoNetParameters::Main::EvolveModel evolve_model("evolve_model", false); EvoNetParameters::Main::EvaluateModel evaluate_model("evaluate_model", false); EvoNetParameters::Main::EvaluateModels evaluate_models("evaluate_models", false); EvoNetParameters::Examples::ModelType model_type("model_type", "EncDec"); // Options include EncDec, Enc, Dec EvoNetParameters::Examples::SimulationType simulation_type("simulation_type", ""); EvoNetParameters::Examples::SupervisionWarmup supervision_warmup("supervision_warmup", false); EvoNetParameters::Examples::SupervisionPercent supervision_percent("supervision_percent", 0); EvoNetParameters::PopulationTrainer::PopulationName population_name("population_name", ""); EvoNetParameters::PopulationTrainer::NGenerations n_generations("n_generations", 1); EvoNetParameters::PopulationTrainer::NInterpreters n_interpreters("n_interpreters", 1); EvoNetParameters::PopulationTrainer::PruneModelNum prune_model_num("prune_model_num", 10); EvoNetParameters::PopulationTrainer::RemoveIsolatedNodes remove_isolated_nodes("remove_isolated_nodes", true); EvoNetParameters::PopulationTrainer::CheckCompleteModelInputToOutput check_complete_model_input_to_output("check_complete_model_input_to_output", true); EvoNetParameters::PopulationTrainer::PopulationSize population_size("population_size", 128); EvoNetParameters::PopulationTrainer::NTop n_top("n_top", 8); EvoNetParameters::PopulationTrainer::NRandom n_random("n_random", 8); EvoNetParameters::PopulationTrainer::NReplicatesPerModel n_replicates_per_model("n_replicates_per_model", 1); EvoNetParameters::PopulationTrainer::ResetModelCopyWeights reset_model_copy_weights("reset_model_copy_weights", true); EvoNetParameters::PopulationTrainer::ResetModelTemplateWeights reset_model_template_weights("reset_model_template_weights", true); EvoNetParameters::PopulationTrainer::Logging population_logging("population_logging", true); EvoNetParameters::PopulationTrainer::SetPopulationSizeFixed set_population_size_fixed("set_population_size_fixed", false); EvoNetParameters::PopulationTrainer::SetPopulationSizeDoubling set_population_size_doubling("set_population_size_doubling", true); EvoNetParameters::PopulationTrainer::SetTrainingStepsByModelSize set_training_steps_by_model_size("set_training_steps_by_model_size", false); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 32); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 64); EvoNetParameters::ModelTrainer::NEpochsTraining n_epochs_training("n_epochs_training", 1000); EvoNetParameters::ModelTrainer::NEpochsValidation n_epochs_validation("n_epochs_validation", 25); EvoNetParameters::ModelTrainer::NEpochsEvaluation n_epochs_evaluation("n_epochs_evaluation", 10); EvoNetParameters::ModelTrainer::NTBTTSteps n_tbtt_steps("n_tbtt_steps", 64); EvoNetParameters::ModelTrainer::NTETTSteps n_tett_steps("n_tett_steps", 64); EvoNetParameters::ModelTrainer::Verbosity verbosity("verbosity", 1); EvoNetParameters::ModelTrainer::LoggingTraining logging_training("logging_training", true); EvoNetParameters::ModelTrainer::LoggingValidation logging_validation("logging_validation", false); EvoNetParameters::ModelTrainer::LoggingEvaluation logging_evaluation("logging_evaluation", true); EvoNetParameters::ModelTrainer::FindCycles find_cycles("find_cycles", true); EvoNetParameters::ModelTrainer::FastInterpreter fast_interpreter("fast_interpreter", true); EvoNetParameters::ModelTrainer::PreserveOoO preserve_ooo("preserve_ooo", true); EvoNetParameters::ModelTrainer::InterpretModel interpret_model("interpret_model", true); EvoNetParameters::ModelTrainer::ResetModel reset_model("reset_model", false); EvoNetParameters::ModelTrainer::NHidden0 n_hidden_0("n_hidden_0", 16); EvoNetParameters::ModelTrainer::NHidden1 n_hidden_1("n_hidden_1", 0); EvoNetParameters::ModelTrainer::NHidden2 n_hidden_2("n_hidden_2", 0); EvoNetParameters::ModelTrainer::LossFncWeight0 loss_fnc_weight_0("loss_fnc_weight_0", 1); // Classification loss EvoNetParameters::ModelTrainer::LossFncWeight1 loss_fnc_weight_1("loss_fnc_weight_1", 1); // Reconstruction loss EvoNetParameters::ModelTrainer::LossFncWeight2 loss_fnc_weight_2("loss_fnc_weight_2", 0); EvoNetParameters::ModelTrainer::LearningRate learning_rate("learning_rate", 1e-5); EvoNetParameters::ModelTrainer::GradientClipping gradient_clipping("gradient_clipping", 10); EvoNetParameters::ModelTrainer::ResetInterpreter reset_interpreter("reset_interpreter", true); EvoNetParameters::ModelTrainer::KLDivergenceWarmup KL_divergence_warmup("KL_divergence_warmup", true); EvoNetParameters::ModelTrainer::NEncodingsContinuous n_encodings_continuous("n_encodings_continuous", 8); EvoNetParameters::ModelTrainer::NEncodingsCategorical n_encodings_categorical("n_encodings_categorical", 8); EvoNetParameters::ModelTrainer::Beta beta("beta", 30); EvoNetParameters::ModelTrainer::CapacityC capacity_c("capacity_c", 5); EvoNetParameters::ModelTrainer::CapacityD capacity_d("capacity_d", 5); EvoNetParameters::ModelReplicator::NNodeDownAdditionsLB n_node_down_additions_lb("n_node_down_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsLB n_node_right_additions_lb("n_node_right_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesLB n_node_down_copies_lb("n_node_down_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesLB n_node_right_copies_lb("n_node_right_copies_lb", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsLB n_link_additons_lb("n_link_additons_lb", 0); EvoNetParameters::ModelReplicator::NLinkCopiesLB n_link_copies_lb("n_link_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsLB n_node_deletions_lb("n_node_deletions_lb", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsLB n_link_deletions_lb("n_link_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesLB n_node_activation_changes_lb("n_node_activation_changes_lb", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesLB n_node_integration_changes_lb("n_node_integration_changes_lb", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsLB n_module_additions_lb("n_module_additions_lb", 0); EvoNetParameters::ModelReplicator::NModuleCopiesLB n_module_copies_lb("n_module_copies_lb", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsLB n_module_deletions_lb("n_module_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownAdditionsUB n_node_down_additions_ub("n_node_down_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsUB n_node_right_additions_ub("n_node_right_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesUB n_node_down_copies_ub("n_node_down_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesUB n_node_right_copies_ub("n_node_right_copies_ub", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsUB n_link_additons_ub("n_link_additons_ub", 0); EvoNetParameters::ModelReplicator::NLinkCopiesUB n_link_copies_ub("n_link_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsUB n_node_deletions_ub("n_node_deletions_ub", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsUB n_link_deletions_ub("n_link_deletions_ub", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesUB n_node_activation_changes_ub("n_node_activation_changes_ub", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesUB n_node_integration_changes_ub("n_node_integration_changes_ub", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsUB n_module_additions_ub("n_module_additions_ub", 0); EvoNetParameters::ModelReplicator::NModuleCopiesUB n_module_copies_ub("n_module_copies_ub", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsUB n_module_deletions_ub("n_module_deletions_ub", 0); EvoNetParameters::ModelReplicator::SetModificationRateFixed set_modification_rate_fixed("set_modification_rate_fixed", false); EvoNetParameters::ModelReplicator::SetModificationRateByPrevError set_modification_rate_by_prev_error("set_modification_rate_by_prev_error", false); auto parameters = std::make_tuple(id, data_dir, output_dir, device_id, model_name, make_model, load_model_csv, load_model_binary, train_model, evolve_model, evaluate_model, evaluate_models, model_type, simulation_type, supervision_warmup, supervision_percent, population_name, n_generations, n_interpreters, /*prune_model_num, remove_isolated_nodes, check_complete_model_input_to_output, population_size, n_top, n_random, n_replicates_per_model, reset_model_copy_weights, reset_model_template_weights, population_logging, set_population_size_fixed, set_population_size_doubling, set_training_steps_by_model_size,*/ batch_size, memory_size, n_epochs_training, n_epochs_validation, n_epochs_evaluation, n_tbtt_steps, n_tett_steps, verbosity, logging_training, logging_validation, logging_evaluation, find_cycles, fast_interpreter, preserve_ooo, interpret_model, reset_model, n_hidden_0, n_hidden_1, n_hidden_2, loss_fnc_weight_0, loss_fnc_weight_1, loss_fnc_weight_2, learning_rate, gradient_clipping, reset_interpreter, KL_divergence_warmup, n_encodings_continuous, n_encodings_categorical, beta, capacity_c, capacity_d/*, n_node_down_additions_lb, n_node_right_additions_lb, n_node_down_copies_lb, n_node_right_copies_lb, n_link_additons_lb, n_link_copies_lb, n_node_deletions_lb, n_link_deletions_lb, n_node_activation_changes_lb, n_node_integration_changes_lb, n_module_additions_lb, n_module_copies_lb, n_module_deletions_lb, n_node_down_additions_ub, n_node_right_additions_ub, n_node_down_copies_ub, n_node_right_copies_ub, n_link_additons_ub, n_link_copies_ub, n_node_deletions_ub, n_link_deletions_ub, n_node_activation_changes_ub, n_node_integration_changes_ub, n_module_additions_ub, n_module_copies_ub, n_module_deletions_ub, set_modification_rate_fixed, set_modification_rate_by_prev_error*/); // Read in the parameters LoadParametersFromCsv loadParametersFromCsv(id_int, parameters_filename); parameters = EvoNet::apply([&loadParametersFromCsv](auto&& ...args) { return loadParametersFromCsv(args...); }, parameters); // Run the application EvoNet::apply([](auto&& ...args) { main_(args ...); }, parameters); return 0; }<file_sep> cmake_minimum_required(VERSION 3.8.2 FATAL_ERROR) project("EvoNet_class_examples_evonet") message(STATUS "building examples...") #------------------------------------------------------------------------------ # get the test executables include(executables.cmake) #------------------------------------------------------------------------------ # Include directories for examples set(EVONET_EXAMPLES_INTERNAL_INCLUDE_DIRECTORIES "${PROJECT_BINARY_DIR}/include/") # add EvoNet directories set(EVONET_EXAMPLES_EXTERNAL_INCLUDE_DIRECTORIES "${EvoNet_INCLUDE_DIRECTORIES}") include_directories(${EVONET_EXAMPLES_INTERNAL_INCLUDE_DIRECTORIES}) include_directories(SYSTEM ${EVONET_EXAMPLES_EXTERNAL_INCLUDE_DIRECTORIES}) #------------------------------------------------------------------------------ # disable optimization for examples for gcc like compilers if (CMAKE_COMPILER_IS_INTELCXX OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG) set(_TMP_CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}) set(CMAKE_CXX_FLAGS_RELEASE "-O0") endif() #------------------------------------------------------------------------------ # ml examples add_custom_target(ML_EXAMPLES) add_dependencies(ML_EXAMPLES ${ml_executables_list}) # cuda examples if (EVONET_CUDA) add_custom_target(CUDA_EXAMPLES) add_dependencies(CUDA_EXAMPLES ${cuda_executables_list}) endif() #------------------------------------------------------------------------------ # Add the actual examples foreach(_examples ${ml_executables_list}) add_executable(${_examples} source/${_examples}) target_link_libraries(${_examples} ${EvoNet_LIBRARIES}) # only add OPENMP flags to gcc linker (execpt Mac OS X, due to compiler bug # see https://sourceforge.net/apps/trac/open-ms/ticket/280 for details) if (OPENMP_FOUND AND NOT MSVC AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") set_target_properties(${_examples} PROPERTIES LINK_FLAGS ${OpenMP_CXX_FLAGS}) endif() if (EVONET_CUDA) target_link_libraries(${_examples} ${CUDA_LIBRARIES}) set_property(TARGET ${_examples} PROPERTY CUDA_STANDARD 11) endif() endforeach(_examples) if (EVONET_CUDA) foreach(_examples ${cuda_executables_list}) add_executable(${_examples} source/${_examples}) target_link_libraries(${_examples} ${EvoNet_LIBRARIES}) # only add OPENMP flags to gcc linker (execpt Mac OS X, due to compiler bug # see https://sourceforge.net/apps/trac/open-ms/ticket/280 for details) if (OPENMP_FOUND AND NOT MSVC AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") set_target_properties(${_examples} PROPERTIES LINK_FLAGS ${OpenMP_CXX_FLAGS}) endif() target_link_libraries(${_examples} ${CUDA_LIBRARIES}) set_property(TARGET ${_examples} PROPERTY CUDA_STANDARD 11) endforeach(_examples) endif() #------------------------------------------------------------------------------ # restore old compiler flags if (CMAKE_COMPILER_IS_INTELCXX OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG) set(${CMAKE_CXX_FLAGS_RELEASE} ${_TMP_CMAKE_CXX_FLAGS_RELEASE}) endif() #------------------------------------------------------------------------------ # add filenames to Visual Studio solution tree set(sources_VS) foreach(i ${EXAMPLE_executables}) list(APPEND sources_VS "${i}") endforeach(i) source_group("" FILES ${sources_VS})<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelTrainerExperimentalDefaultDevice.h> #include <EvoNet/ml/ModelReplicatorExperimental.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/Parameters.h> #include <EvoNet/simulator/HarmonicOscillatorSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; template<typename TensorT> class DataSimulatorExt : public HarmonicOscillatorSimulator<TensorT> { public: std::string simulation_name_ = std::string("WeightSpring1W1S1DwDamping"); void simulateDataWeightSpring3W2S1D(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); HarmonicOscillatorSimulator<float> WeightSpring; std::random_device rd{}; std::mt19937 gen{ rd() }; std::normal_distribution<> dist{ 0.0f, 1.0f }; // Generate the input and output data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { // Simulate a 3 weight and 2 spring 1D harmonic system // where the middle weight has been displaced by a random amount Eigen::Tensor<float, 1> time_steps(memory_size); Eigen::Tensor<float, 2> displacements(memory_size, 3); WeightSpring.WeightSpring3W2S1D(time_steps, displacements, memory_size, 0.1, 1, 1, 1, //A 1, 1, 1, //m 0, dist(gen), 0, //xo 1); for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { if (memory_iter >= memory_size - 1) input_data(batch_iter, memory_iter, 0, epochs_iter) = displacements(memory_size - 1 - memory_iter, 1); // m2 else input_data(batch_iter, memory_iter, 0, epochs_iter) = TensorT(0); output_data(batch_iter, memory_iter, 0, epochs_iter) = displacements(memory_size - 1 - memory_iter, 0); // m1 output_data(batch_iter, memory_iter, 1, epochs_iter) = displacements(memory_size - 1 - memory_iter, 2); // m3 } } } time_steps.setConstant(1.0f); } void simulateDataWeightSpring3W2S1D(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); HarmonicOscillatorSimulator<float> WeightSpring; std::random_device rd{}; std::mt19937 gen{ rd() }; std::normal_distribution<> dist{ 0.0f, 1.0f }; // Generate the input and output data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { // Simulate a 3 weight and 2 spring 1D harmonic system // where the middle weight has been displaced by a random amount Eigen::Tensor<float, 1> time_steps(memory_size); Eigen::Tensor<float, 2> displacements(memory_size, 3); WeightSpring.WeightSpring3W2S1D(time_steps, displacements, memory_size, 0.1, 1, 1, 1, //A 1, 1, 1, //m 0, dist(gen), 0, //xo 1); for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { if (memory_iter >= memory_size - 1) input_data(batch_iter, memory_iter, 0) = displacements(memory_size - 1 - memory_iter, 1); // m2 else input_data(batch_iter, memory_iter, 0) = TensorT(0); output_data(batch_iter, memory_iter, 0) = displacements(memory_size - 1 - memory_iter, 0); // m1 output_data(batch_iter, memory_iter, 1) = displacements(memory_size - 1 - memory_iter, 2); // m3 } } time_steps.setConstant(1.0f); } void simulateDataWeightSpring1W1S1D(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); HarmonicOscillatorSimulator<float> WeightSpring; std::random_device rd{}; std::mt19937 gen{ rd() }; std::normal_distribution<> dist{ 0.0f, 1.0f }; const int time_course_multiplier = 2; // How long to make the time course based on the memory size const int n_batches_per_time_course = 4; // The number of chunks each simulation time course is chopped into const int time_steps_size = ((memory_size > n_batches_per_time_course) ? memory_size : n_batches_per_time_course)* time_course_multiplier + 1; // The total number of time_steps per simulation time course Eigen::Tensor<float, 1> time_steps_displacements(time_steps_size); Eigen::Tensor<float, 2> displacements_all(time_steps_size, 1); // Generate the input and output data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { // Simulate a 1 weight and 1 spring 1D harmonic system where the weight has been displaced by a random amount const int remainder = batch_iter % n_batches_per_time_course; const int increment = (time_course_multiplier * memory_size - memory_size) / (n_batches_per_time_course - 1); if (remainder == 0) { WeightSpring.WeightSpring1W1S1D(time_steps_displacements, displacements_all, time_steps_size, 0.1, 1, 1, dist(gen), 0); } Eigen::array<Eigen::Index, 2> offset = { increment * remainder, 0 }; Eigen::array<Eigen::Index, 2> span = { memory_size + 1, 1 }; Eigen::Tensor<float, 2> displacements = displacements_all.slice(offset, span); for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { if (memory_iter < 1) input_data(batch_iter, memory_size - 1 - memory_iter, 0, epochs_iter) = displacements(memory_iter, 0); else input_data(batch_iter, memory_size - 1 - memory_iter, 0, epochs_iter) = TensorT(0); output_data(batch_iter, memory_size - 1 - memory_iter, 0, epochs_iter) = displacements(memory_iter + 1, 0); } } } time_steps.setConstant(1.0f); } void simulateDataWeightSpring1W1S1D(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); HarmonicOscillatorSimulator<float> WeightSpring; std::random_device rd{}; std::mt19937 gen{ rd() }; std::normal_distribution<> dist{ 0.0f, 1.0f }; const int time_course_multiplier = 2; // How long to make the time course based on the memory size const int n_batches_per_time_course = 4; // The number of chunks each simulation time course is chopped into const int time_steps_size = ((memory_size > n_batches_per_time_course) ? memory_size : n_batches_per_time_course)* time_course_multiplier + 1; // The total number of time_steps per simulation time course Eigen::Tensor<float, 1> time_steps_displacements(time_steps_size); Eigen::Tensor<float, 2> displacements_all(time_steps_size, 1); // Generate the input and output data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { // Simulate a 1 weight and 1 spring 1D harmonic system where the weight has been displaced by a random amount const int remainder = batch_iter % n_batches_per_time_course; const int increment = (time_course_multiplier * memory_size - memory_size) / (n_batches_per_time_course - 1); if (remainder == 0) { WeightSpring.WeightSpring1W1S1D(time_steps_displacements, displacements_all, time_steps_size, 0.1, 1, 1, dist(gen), 0); } Eigen::array<Eigen::Index, 2> offset = { increment * remainder, 0 }; Eigen::array<Eigen::Index, 2> span = { memory_size + 1, 1 }; Eigen::Tensor<float, 2> displacements = displacements_all.slice(offset, span); for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { if (memory_iter < 1) input_data(batch_iter, memory_size - 1 - memory_iter, 0) = displacements(memory_iter, 0); else input_data(batch_iter, memory_size - 1 - memory_iter, 0) = TensorT(0); output_data(batch_iter, memory_size - 1 - memory_iter, 0) = displacements(memory_iter + 1, 0); // The next time point metric_output_data(batch_iter, memory_size - 1 - memory_iter, 0) = displacements(memory_iter + 1, 0); // The next time point } } time_steps.setConstant(1.0f); } void simulateDataWeightSpring1W1S1DwDamping(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); HarmonicOscillatorSimulator<float> WeightSpring; std::random_device rd{}; std::mt19937 gen{ rd() }; std::normal_distribution<> dist{ 0.0f, 1.0f }; // Generate the input and output data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { // Simulate a 1 weight and 1 spring 1D harmonic system // where the weight has been displaced by a random amount Eigen::Tensor<float, 1> time_steps(memory_size + 1); Eigen::Tensor<float, 2> displacements(memory_size + 1, 1); WeightSpring.WeightSpring1W1S1DwDamping(time_steps, displacements, memory_size + 1, 0.1, 1, 1, 0.5, dist(gen), 0); for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { if (memory_iter < 1) input_data(batch_iter, memory_size - 1 - memory_iter, 0, epochs_iter) = displacements(memory_iter, 0); else input_data(batch_iter, memory_size - 1 - memory_iter, 0, epochs_iter) = TensorT(0); output_data(batch_iter, memory_size - 1 - memory_iter, 0, epochs_iter) = displacements(memory_iter + 1, 0); } } } time_steps.setConstant(1.0f); } void simulateDataWeightSpring1W1S1DwDamping(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); HarmonicOscillatorSimulator<float> WeightSpring; std::random_device rd{}; std::mt19937 gen{ rd() }; std::normal_distribution<> dist{ 0.0f, 1.0f }; const int time_course_multiplier = 2; // How long to make the time course based on the memory size const int n_batches_per_time_course = 4; // The number of chunks each simulation time course is chopped into const int time_steps_size = ((memory_size > n_batches_per_time_course) ? memory_size : n_batches_per_time_course)* time_course_multiplier + 1; // The total number of time_steps per simulation time course Eigen::Tensor<float, 1> time_steps_displacements(time_steps_size); Eigen::Tensor<float, 2> displacements_all(time_steps_size, 1); // Generate the input and output data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { // Simulate a 1 weight and 1 spring 1D harmonic system where the weight has been displaced by a random amount const int remainder = batch_iter % n_batches_per_time_course; const int increment = (time_steps_size - 1 - memory_size) / (n_batches_per_time_course - 1); if (remainder == 0) { WeightSpring.WeightSpring1W1S1DwDamping(time_steps_displacements, displacements_all, time_steps_size, 0.1, 1, 1, 0.5, dist(gen), 0); } Eigen::array<Eigen::Index, 2> offset = { increment * remainder, 0 }; Eigen::array<Eigen::Index, 2> span = { memory_size + 1, 1 }; Eigen::Tensor<float, 2> displacements = displacements_all.slice(offset, span); for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { if (memory_iter < 1) input_data(batch_iter, memory_size - 1 - memory_iter, 0) = displacements(memory_iter, 0); else input_data(batch_iter, memory_size - 1 - memory_iter, 0) = TensorT(0); output_data(batch_iter, memory_size - 1 - memory_iter, 0) = displacements(memory_iter + 1, 0); // The next time point } } time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { if (simulation_name_ == "WeightSpring1W1S1D") simulateDataWeightSpring1W1S1D(input_data, output_data, time_steps); else if (simulation_name_ == "WeightSpring1W1S1DwDamping") simulateDataWeightSpring1W1S1DwDamping(input_data, output_data, time_steps); else if (simulation_name_ == "WeightSpring3W2S1D") simulateDataWeightSpring3W2S1D(input_data, output_data, time_steps); } void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { if (simulation_name_ == "WeightSpring1W1S1D") simulateDataWeightSpring1W1S1D(input_data, output_data, metric_output_data, time_steps); else if (simulation_name_ == "WeightSpring1W1S1DwDamping") simulateDataWeightSpring1W1S1DwDamping(input_data, output_data, metric_output_data, time_steps); else if (simulation_name_ == "WeightSpring3W2S1D") simulateDataWeightSpring3W2S1D(input_data, output_data, metric_output_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { if (simulation_name_ == "WeightSpring1W1S1D") simulateDataWeightSpring1W1S1D(input_data, output_data, time_steps); else if (simulation_name_ == "WeightSpring1W1S1DwDamping") simulateDataWeightSpring1W1S1DwDamping(input_data, output_data, time_steps); else if (simulation_name_ == "WeightSpring3W2S1D") simulateDataWeightSpring3W2S1D(input_data, output_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { if (simulation_name_ == "WeightSpring1W1S1D") simulateDataWeightSpring1W1S1D(input_data, output_data, metric_output_data, time_steps); else if (simulation_name_ == "WeightSpring1W1S1DwDamping") simulateDataWeightSpring1W1S1DwDamping(input_data, output_data, metric_output_data, time_steps); else if (simulation_name_ == "WeightSpring3W2S1D") simulateDataWeightSpring3W2S1D(input_data, output_data, metric_output_data, time_steps); } void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) {}; void simulateEvaluationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // HACK: using output_data as metric_output_data if (simulation_name_ == "WeightSpring1W1S1D") simulateDataWeightSpring1W1S1D(input_data, metric_output_data, metric_output_data, time_steps); else if (simulation_name_ == "WeightSpring1W1S1DwDamping") simulateDataWeightSpring1W1S1DwDamping(input_data, metric_output_data, Eigen::Tensor<TensorT, 3>(), time_steps); else if (simulation_name_ == "WeightSpring3W2S1D") simulateDataWeightSpring3W2S1D(input_data, metric_output_data, Eigen::Tensor<TensorT, 3>(), time_steps); }; }; // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: /* @brief Interaction graph network for linear harmonic oscillator systems consisting of springs, masses, and a fixed wall tethered to one of the springs with or without damping each mass will get its own input and output @param[in] model @param[in] n_masses The number of masses @param[in] n_springs The number of springs @param[in] n_fc_0 (Optional) The number of layers in the first fully connected layer @param[in] n_fc_1 (Optional) The number of layers in the second fully connected layer */ void makeHarmonicOscillator1D(Model<TensorT>& model, const int& n_masses, const int& n_fc_1, const int& n_fc_2, const bool& add_biases, const bool& specify_layers) { model.setId(0); model.setName("HarmonicOscillator1D"); ModelBuilder<TensorT> model_builder; // Define the node activation auto activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); auto activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); auto activation_masses = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); auto activation_masses_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver and weight init auto weight_init = std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1.0)); auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(1e-5, 0.9, 0.999, 1e-8, 10)); // Make the input nodes std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_masses, specify_layers); // Connect the input nodes to the masses std::vector<std::string> node_names_masses_t0 = model_builder.addSinglyConnected(model, "Mass(t)", "Mass(t)", node_names_input, n_masses, activation_masses, activation_masses_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, add_biases, specify_layers); for (const std::string& node_name : node_names_masses_t0) model.getNodesMap().at(node_name)->setType(NodeType::unmodifiable); // Make the mass(t+1) nodes std::vector<std::string> node_names_masses_t1 = model_builder.addHiddenNodes(model, "Mass(t+1)", "Mass(t+1)", n_masses, activation_masses, activation_masses_grad, integration_op, integration_error_op, integration_weight_grad_op, solver_op, 0.0f, 0.0f, add_biases, specify_layers); //// Connect the mass(t) nodes to the mass(t+1) nodes //std::vector<std::string> node_names_masses_t1 = model_builder.addSinglyConnected(model, "Mass(t+1)", "Mass(t+1)", node_names_masses_t0, n_masses, // activation_masses, activation_masses_grad, // integration_op, integration_error_op, integration_weight_grad_op, // std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), // std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, add_biases, specify_layers); for (const std::string& node_name : node_names_masses_t1) model.getNodesMap().at(node_name)->setType(NodeType::unmodifiable); // Connect the mass(t+1) nodes to the mass(t) nodes model_builder.addSinglyConnected(model, "Mass", node_names_masses_t1, node_names_masses_t0, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, specify_layers); for (int i = 0; i < n_masses; ++i) model.addCyclicPairs(std::make_pair(node_names_masses_t1.at(i), node_names_masses_t0.at(i))); // Connect the mass to the output nodes std::vector<std::string> node_names_output = model_builder.addSinglyConnected(model, "Output", "Output", node_names_masses_t1, n_masses, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, add_biases, specify_layers); for (const std::string& node_name : node_names_output) model.getNodesMap().at(node_name)->setType(NodeType::output); // Make the gravity and wall nodes std::vector<std::string> node_names_wall = model_builder.addInputNodes(model, "Wall", "Input", 1, specify_layers); model.getNodesMap().at(node_names_wall.front())->setType(NodeType::bias); // Make the deep learning layers between each of the masses for (int mass_iter = 0; mass_iter < n_masses; ++mass_iter) { std::vector<std::string> node_names; // determine the input nodes if (mass_iter == 0 && mass_iter == n_masses - 1) { node_names = { node_names_wall.front(), node_names_masses_t0.at(mass_iter) }; } else if (mass_iter == 0) { node_names = { node_names_wall.front(), node_names_masses_t0.at(mass_iter), node_names_masses_t0.at(mass_iter + 1) }; } else if (mass_iter == n_masses - 1) { node_names = { node_names_masses_t0.at(mass_iter - 1), node_names_masses_t0.at(mass_iter) }; } else { node_names = { node_names_masses_t0.at(mass_iter - 1), node_names_masses_t0.at(mass_iter), node_names_masses_t0.at(mass_iter + 1) }; } // make the FC layers between input nodes if (n_fc_1 > 0) { node_names = model_builder.addFullyConnected(model, "FC1Forward", "FC1Forward", node_names, n_fc_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size() + n_fc_1, 2)), //weight_init, solver_op, 0.0f, 0.0f, add_biases, specify_layers); } if (n_fc_2 > 0) { node_names = model_builder.addFullyConnected(model, "FC2Forward", "FC2Forward", node_names, n_fc_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size() + n_fc_2, 2)), //weight_init, solver_op, 0.0f, 0.0f, add_biases, specify_layers); } model_builder.addFullyConnected(model, "FC0Forward", node_names, std::vector<std::string>({ node_names_masses_t1.at(mass_iter) }), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size() + 1, 2)), //weight_init, solver_op, 0.0f, specify_layers); } model.setInputAndOutputNodes(); } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicatorExperimental<TensorT> { public: /* @brief Implementation of the `adaptiveReplicatorScheduler` */ void adaptiveReplicatorScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations)override { // Adjust the models modifications rates //this->setModificationRateByPrevError(n_generations, models, models_errors_per_generations); this->setModificationRateFixed(n_generations, models, models_errors_per_generations); } }; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerExperimentalDefaultDevice<TensorT> { public: /* @brief Implementation of the `adaptivePopulationScheduler` */ void adaptivePopulationScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations)override { // Adjust the population size //this->setPopulationSizeFixed(n_generations, models, models_errors_per_generations); // [TODO: single model training requires the line below to be commented] this->setPopulationSizeDoubling(n_generations, models, models_errors_per_generations); } void trainingPopulationLogger( const int& n_generations, std::vector<Model<TensorT>>& models, PopulationLogger<TensorT>& population_logger, const std::vector<std::tuple<int, std::string, TensorT>>& models_validation_errors_per_generation)override { // Export the selected models for (auto& model : models) { ModelFile<TensorT> data; data.storeModelCsv(model.getName() + "_" + std::to_string(n_generations) + "_nodes.csv", model.getName() + "_" + std::to_string(n_generations) + "_links.csv", model.getName() + "_" + std::to_string(n_generations) + "_weights.csv", model); } // Log the population statistics population_logger.writeLogs(n_generations, models_validation_errors_per_generation); } }; template<class ...ParameterTypes> void main_HarmonicOscillator1D(const ParameterTypes& ...args) { auto parameters = std::make_tuple(args...); // define the population trainer parameters PopulationTrainerExt<float> population_trainer; population_trainer.setNGenerations(std::get<EvoNetParameters::PopulationTrainer::NGenerations>(parameters).get()); // population training population_trainer.setLogging(true); population_trainer.setResetModelCopyWeights(true); // define the population logger PopulationLogger<float> population_logger(true, true); // define the multithreading parameters const int n_hard_threads = std::thread::hardware_concurrency(); const int n_threads = (std::get<EvoNetParameters::PopulationTrainer::NInterpreters>(parameters).get() > n_hard_threads) ? n_hard_threads : std::get<EvoNetParameters::PopulationTrainer::NInterpreters>(parameters).get(); // the number of threads // Make the input nodes const int n_masses = 1; std::vector<std::string> input_nodes; for (int i = 0; i < n_masses; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the output nodes std::vector<std::string> output_nodes; for (int i = 0; i < n_masses; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // define the data simulator DataSimulatorExt<float> data_simulator; data_simulator.simulation_name_ = std::get<EvoNetParameters::Main::SimulationType>(parameters).get(); // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(std::get<EvoNetParameters::Main::DeviceId>(parameters).get(), 0) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; model_trainer.setBatchSize(std::get<EvoNetParameters::ModelTrainer::BatchSize>(parameters).get()); model_trainer.setMemorySize(std::get<EvoNetParameters::ModelTrainer::MemorySize>(parameters).get()); model_trainer.setNEpochsTraining(std::get<EvoNetParameters::ModelTrainer::NEpochsTraining>(parameters).get()); model_trainer.setNEpochsValidation(std::get<EvoNetParameters::ModelTrainer::NEpochsValidation>(parameters).get()); model_trainer.setNEpochsEvaluation(std::get<EvoNetParameters::ModelTrainer::NEpochsEvaluation>(parameters).get()); model_trainer.setNTBPTTSteps(std::get<EvoNetParameters::ModelTrainer::NTBTTSteps>(parameters).get()); model_trainer.setNTETTSteps(std::get<EvoNetParameters::ModelTrainer::NTBTTSteps>(parameters).get()); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true, false, true); model_trainer.setFindCycles(std::get<EvoNetParameters::ModelTrainer::FindCycles>(parameters).get()); // Specified in the model model_trainer.setFastInterpreter(std::get<EvoNetParameters::ModelTrainer::FastInterpreter>(parameters).get()); // IG default model_trainer.setPreserveOoO(true); std::vector<LossFunctionHelper<float>> loss_function_helpers; LossFunctionHelper<float> loss_function_helper2; loss_function_helper2.output_nodes_ = output_nodes; loss_function_helper2.loss_functions_ = { std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-24, 1.0)) }; loss_function_helper2.loss_function_grads_ = { std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-24, 1.0)) }; loss_function_helpers.push_back(loss_function_helper2); model_trainer.setLossFunctionHelpers(loss_function_helpers); std::vector<MetricFunctionHelper<float>> metric_function_helpers; MetricFunctionHelper<float> metric_function_helper1; metric_function_helper1.output_nodes_ = output_nodes; metric_function_helper1.metric_functions_ = { std::make_shared<PearsonROp<float>>(PearsonROp<float>("Mean")), std::make_shared<PearsonROp<float>>(PearsonROp<float>("Var")), std::make_shared<EuclideanDistOp<float>>(EuclideanDistOp<float>("Mean")), std::make_shared<EuclideanDistOp<float>>(EuclideanDistOp<float>("Var")) }; metric_function_helper1.metric_names_ = { "PearsonR-Mean", "PearsonR-Var", "EuclideanDist-Mean", "EuclideanDist-Var" }; metric_function_helpers.push_back(metric_function_helper1); model_trainer.setMetricFunctionHelpers(metric_function_helpers); // define the model logger ModelLogger<float> model_logger(true, true, true, false, false, true, false, true); // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; model_replicator.setNodeActivations({ std::make_pair(std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>())), std::make_pair(std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>())), std::make_pair(std::make_shared<ELUOp<float>>(ELUOp<float>()), std::make_shared<ELUGradOp<float>>(ELUGradOp<float>())), std::make_pair(std::make_shared<SigmoidOp<float>>(SigmoidOp<float>()), std::make_shared<SigmoidGradOp<float>>(SigmoidGradOp<float>())), std::make_pair(std::make_shared<TanHOp<float>>(TanHOp<float>()), std::make_shared<TanHGradOp<float>>(TanHGradOp<float>())), //std::make_pair(std::make_shared<ExponentialOp<float>>(ExponentialOp<float>()), std::make_shared<ExponentialGradOp<float>>(ExponentialGradOp<float>())), //std::make_pair(std::make_shared<LogOp<float>>(LogOp<float>()), std::make_shared<LogGradOp<float>>(LogGradOp<float>())), //std::make_pair(std::shared_ptr<ActivationOp<float>>(new InverseOp<float>()), std::shared_ptr<ActivationOp<float>>(new InverseGradOp<float>())) }); model_replicator.setNodeIntegrations({ std::make_tuple(std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())), std::make_tuple(std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>())), //std::make_tuple(std::make_shared<MeanOp<float>>(MeanOp<float>()), std::make_shared<MeanErrorOp<float>>(MeanErrorOp<float>()), std::make_shared<MeanWeightGradO<float>>(MeanWeightGradOp<float>())), //std::make_tuple(std::make_shared<VarModOp<float>>(VarModOp<float>()), std::make_shared<VarModErrorOp<float>>(VarModErrorOp<float>()), std::make_shared<VarModWeightGradOp<float>>(VarModWeightGradOp<float>())), //std::make_tuple(std::make_shared<CountOp<float>>(CountOp<float>()), std::make_shared<CountErrorOp<float>>(CountErrorOp<float>()), std::make_shared<CountWeightGradOp<float>>(CountWeightGradOp<float>())) }); // define the initial population Model<float> model; if (std::get<EvoNetParameters::Main::MakeModel>(parameters).get()) { std::cout << "Making the model..." << std::endl; if (std::get<EvoNetParameters::Main::TrainModel>(parameters).get()) ModelTrainerExt<float>().makeHarmonicOscillator1D(model, 1, 32, 0, false, true); else if (std::get<EvoNetParameters::Main::EvolveModel>(parameters).get()) ModelTrainerExt<float>().makeHarmonicOscillator1D(model, 1, 8, 0, false, false); } else { // read in the trained model std::cout << "Reading in the model..." << std::endl; ModelFile<float> model_file; //model_file.loadModelBinary(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_model.binary", model); model_file.loadModelCsv(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_nodes.csv", std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_links.csv", std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_weights.csv", model, true, true, true); model.addCyclicPairs(std::make_pair("Output_000000000000", "Mass(t)_000000000000")); //model.addCyclicPairs(std::make_pair("Mass(t+1)_000000000000", "Mass(t)_000000000000")); model.setId(1); //ModelInterpreterFileDefaultDevice<float> model_interpreter_file; //model_interpreter_file.loadModelInterpreterBinary(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get() + "_interpreter.binary", model_interpreters[0]); // FIX ME! } model.setName(std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::Main::ModelName>(parameters).get()); //So that all output will be written to a specific directory if (std::get<EvoNetParameters::Main::TrainModel>(parameters).get()) { // Train the model model.setName(model.getName() + "_train"); std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); } else if (std::get<EvoNetParameters::Main::EvolveModel>(parameters).get()) { // Evolve the population std::vector<Model<float>> population = { model }; std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( population, std::get<EvoNetParameters::General::DataDir>(parameters).get() + std::get<EvoNetParameters::PopulationTrainer::PopulationName>(parameters).get(), //So that all output will be written to a specific directory model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); PopulationTrainerFile<float> population_trainer_file; population_trainer_file.storeModels(population, "HarmonicOscillator"); population_trainer_file.storeModelValidations("HarmonicOscillatorErrors.csv", models_validation_errors_per_generation); } else if (std::get<EvoNetParameters::Main::EvaluateModel>(parameters).get()) { //// Evaluate the population //std::vector<Model<float>> population = { model }; //population_trainer.evaluateModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, input_nodes); // Evaluate the model model.setName(model.getName() + "_evaluation"); Eigen::Tensor<float, 4> model_output = model_trainer.evaluateModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); } } /* @brief Run the training/evolution/evaluation from the command line Example: ./HarmonicOscillator_DefaultDevice_example 0 "C:/Users/dmccloskey/Documents/GitHub/EvoNetData/MNIST_examples/HarmonicOscillator/Parameters.csv" @param id ID of the parameters @param parameters_filename the name and path of the parameters_file */ int main(int argc, char** argv) { // Parse the user commands int id_int = -1; std::string parameters_filename = ""; parseCommandLineArguments(argc, argv, id_int, parameters_filename); // Set the parameter names and defaults EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::Main::DeviceId device_id("device_id", 0); EvoNetParameters::Main::ModelName model_name("model_name", ""); EvoNetParameters::Main::MakeModel make_model("make_model", true); EvoNetParameters::Main::LoadModelCsv load_model_csv("load_model_csv", false); EvoNetParameters::Main::LoadModelBinary load_model_binary("load_model_binary", false); EvoNetParameters::Main::TrainModel train_model("train_model", true); EvoNetParameters::Main::EvolveModel evolve_model("evolve_model", false); EvoNetParameters::Main::EvaluateModel evaluate_model("evaluate_model", false); EvoNetParameters::Examples::NMask n_mask("n_mask", 2); EvoNetParameters::Examples::SequenceLength sequence_length("sequence_length", 25); EvoNetParameters::Examples::ModelType model_type("model_type", "Solution"); EvoNetParameters::Examples::SimulationType simulation_type("simulation_type", ""); EvoNetParameters::Examples::BiochemicalRxnsFilename biochemical_rxns_filename("biochemical_rxns_filename", "iJO1366.csv"); EvoNetParameters::PopulationTrainer::PopulationName population_name("population_name", ""); EvoNetParameters::PopulationTrainer::NGenerations n_generations("n_generations", 1); EvoNetParameters::PopulationTrainer::NInterpreters n_interpreters("n_interpreters", 1); EvoNetParameters::PopulationTrainer::PruneModelNum prune_model_num("prune_model_num", 10); EvoNetParameters::PopulationTrainer::RemoveIsolatedNodes remove_isolated_nodes("remove_isolated_nodes", true); EvoNetParameters::PopulationTrainer::CheckCompleteModelInputToOutput check_complete_model_input_to_output("check_complete_model_input_to_output", true); EvoNetParameters::PopulationTrainer::PopulationSize population_size("population_size", 128); EvoNetParameters::PopulationTrainer::NTop n_top("n_top", 8); EvoNetParameters::PopulationTrainer::NRandom n_random("n_random", 8); EvoNetParameters::PopulationTrainer::NReplicatesPerModel n_replicates_per_model("n_replicates_per_model", 1); EvoNetParameters::PopulationTrainer::ResetModelCopyWeights reset_model_copy_weights("reset_model_copy_weights", true); EvoNetParameters::PopulationTrainer::ResetModelTemplateWeights reset_model_template_weights("reset_model_template_weights", true); EvoNetParameters::PopulationTrainer::Logging population_logging("population_logging", true); EvoNetParameters::PopulationTrainer::SetPopulationSizeFixed set_population_size_fixed("set_population_size_fixed", false); EvoNetParameters::PopulationTrainer::SetPopulationSizeDoubling set_population_size_doubling("set_population_size_doubling", true); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 32); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 64); EvoNetParameters::ModelTrainer::NEpochsTraining n_epochs_training("n_epochs_training", 1000); EvoNetParameters::ModelTrainer::NEpochsValidation n_epochs_validation("n_epochs_validation", 25); EvoNetParameters::ModelTrainer::NEpochsEvaluation n_epochs_evaluation("n_epochs_evaluation", 10); EvoNetParameters::ModelTrainer::NTBTTSteps n_tbtt_steps("n_tbtt_steps", 64); EvoNetParameters::ModelTrainer::NTETTSteps n_tett_steps("n_tett_steps", 64); EvoNetParameters::ModelTrainer::Verbosity verbosity("verbosity", 1); EvoNetParameters::ModelTrainer::LoggingTraining logging_training("logging_training", true); EvoNetParameters::ModelTrainer::LoggingValidation logging_validation("logging_validation", false); EvoNetParameters::ModelTrainer::LoggingEvaluation logging_evaluation("logging_evaluation", true); EvoNetParameters::ModelTrainer::FindCycles find_cycles("find_cycles", true); EvoNetParameters::ModelTrainer::FastInterpreter fast_interpreter("fast_interpreter", true); EvoNetParameters::ModelTrainer::PreserveOoO preserve_ooo("preserve_ooo", true); EvoNetParameters::ModelTrainer::InterpretModel interpret_model("interpret_model", true); EvoNetParameters::ModelTrainer::ResetModel reset_model("reset_model", false); EvoNetParameters::ModelTrainer::ResetInterpreter reset_interpreter("reset_interpreter", true); EvoNetParameters::ModelReplicator::NNodeDownAdditionsLB n_node_down_additions_lb("n_node_down_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsLB n_node_right_additions_lb("n_node_right_additions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesLB n_node_down_copies_lb("n_node_down_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesLB n_node_right_copies_lb("n_node_right_copies_lb", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsLB n_link_additons_lb("n_link_additons_lb", 0); EvoNetParameters::ModelReplicator::NLinkCopiesLB n_link_copies_lb("n_link_copies_lb", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsLB n_node_deletions_lb("n_node_deletions_lb", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsLB n_link_deletions_lb("n_link_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesLB n_node_activation_changes_lb("n_node_activation_changes_lb", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesLB n_node_integration_changes_lb("n_node_integration_changes_lb", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsLB n_module_additions_lb("n_module_additions_lb", 0); EvoNetParameters::ModelReplicator::NModuleCopiesLB n_module_copies_lb("n_module_copies_lb", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsLB n_module_deletions_lb("n_module_deletions_lb", 0); EvoNetParameters::ModelReplicator::NNodeDownAdditionsUB n_node_down_additions_ub("n_node_down_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightAdditionsUB n_node_right_additions_ub("n_node_right_additions_ub", 0); EvoNetParameters::ModelReplicator::NNodeDownCopiesUB n_node_down_copies_ub("n_node_down_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeRightCopiesUB n_node_right_copies_ub("n_node_right_copies_ub", 0); EvoNetParameters::ModelReplicator::NLinkAdditionsUB n_link_additons_ub("n_link_additons_ub", 0); EvoNetParameters::ModelReplicator::NLinkCopiesUB n_link_copies_ub("n_link_copies_ub", 0); EvoNetParameters::ModelReplicator::NNodeDeletionsUB n_node_deletions_ub("n_node_deletions_ub", 0); EvoNetParameters::ModelReplicator::NLinkDeletionsUB n_link_deletions_ub("n_link_deletions_ub", 0); EvoNetParameters::ModelReplicator::NNodeActivationChangesUB n_node_activation_changes_ub("n_node_activation_changes_ub", 0); EvoNetParameters::ModelReplicator::NNodeIntegrationChangesUB n_node_integration_changes_ub("n_node_integration_changes_ub", 0); EvoNetParameters::ModelReplicator::NModuleAdditionsUB n_module_additions_ub("n_module_additions_ub", 0); EvoNetParameters::ModelReplicator::NModuleCopiesUB n_module_copies_ub("n_module_copies_ub", 0); EvoNetParameters::ModelReplicator::NModuleDeletionsUB n_module_deletions_ub("n_module_deletions_ub", 0); EvoNetParameters::ModelReplicator::SetModificationRateFixed set_modification_rate_fixed("set_modification_rate_fixed", false); EvoNetParameters::ModelReplicator::SetModificationRateByPrevError set_modification_rate_by_prev_error("set_modification_rate_by_prev_error", false); auto parameters = std::make_tuple(id, data_dir, device_id, model_name, make_model, load_model_csv, load_model_binary, train_model, evolve_model, evaluate_model, n_mask, sequence_length, model_type, simulation_type, biochemical_rxns_filename, population_name, n_generations, n_interpreters, prune_model_num, remove_isolated_nodes, check_complete_model_input_to_output, population_size, n_top, n_random, n_replicates_per_model, reset_model_copy_weights, reset_model_template_weights, population_logging, set_population_size_fixed, set_population_size_doubling, batch_size, memory_size, n_epochs_training, n_epochs_validation, n_epochs_evaluation, n_tbtt_steps, n_tett_steps, verbosity, logging_training, logging_validation, logging_evaluation, find_cycles, fast_interpreter, preserve_ooo, interpret_model, reset_model, reset_interpreter, n_node_down_additions_lb, n_node_right_additions_lb, n_node_down_copies_lb, n_node_right_copies_lb, n_link_additons_lb, n_link_copies_lb, n_node_deletions_lb, n_link_deletions_lb, n_node_activation_changes_lb, n_node_integration_changes_lb, n_module_additions_lb, n_module_copies_lb, n_module_deletions_lb, n_node_down_additions_ub, n_node_right_additions_ub, n_node_down_copies_ub, n_node_right_copies_ub, n_link_additons_ub, n_link_copies_ub, n_node_deletions_ub, n_link_deletions_ub, n_node_activation_changes_ub, n_node_integration_changes_ub, n_module_additions_ub, n_module_copies_ub, n_module_deletions_ub, set_modification_rate_fixed, set_modification_rate_by_prev_error); // Read in the parameters LoadParametersFromCsv loadParametersFromCsv(id_int, parameters_filename); parameters = EvoNet::apply([&loadParametersFromCsv](auto&& ...args) { return loadParametersFromCsv(args...); }, parameters); // Run the application EvoNet::apply([](auto&& ...args) { main_HarmonicOscillator1D(args ...); }, parameters); return 0; }<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Statistics test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/core/Statistics.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(statistics) BOOST_AUTO_TEST_CASE(S_getConfidenceIntervals) { std::vector<float> data = { 0, 2, 9, 8, 5, 3, 1, 7, 6, 4 }; std::pair<float,float> result = confidence(data, 0.1f); BOOST_CHECK_CLOSE(result.first, 0, 1e-3); BOOST_CHECK_CLOSE(result.second, 9, 1e-3); } BOOST_AUTO_TEST_CASE(S_moment) { float data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; int n = 10; float ave, adev, sdev, var, skew, curt; moment(data, n, ave, adev, sdev, var, skew, curt); BOOST_CHECK_CLOSE(ave, 4.5, 1e-3); BOOST_CHECK_CLOSE(adev, 2.5, 1e-3); BOOST_CHECK_CLOSE(sdev, 3.02765, 1e-3); BOOST_CHECK_CLOSE(var, 9.1666667, 1e-3); BOOST_CHECK_CLOSE(skew, 0.0, 1e-3); BOOST_CHECK_CLOSE(curt, -1.56163645, 1e-3); } BOOST_AUTO_TEST_CASE(S_kstwo) { float data1[] = { 0.55370819,-1.45963199,-1.29458514,-1.50967395,1.5718749,-0.97569619,0.48069879,0.62561431,0.72235302,0.91032644 }; float data2[] = { 0.57870121,-1.60018641,0.25349027,-0.5041274,1.56796895,1.78298162,0.76469507,2.10362939,1.25984919,1.57030662,1.50733272,2.0732344 }; float d, prob; kstwo(data1, 10, data2, 12, d, prob); BOOST_CHECK_CLOSE(d, 0.466666669, 1e-5); BOOST_CHECK_CLOSE(prob, 0.130679056, 1e-5); //BOOST_CHECK_CLOSE(d, 0.48333333333333334, 1e-5); //python3.6 //BOOST_CHECK_CLOSE(prob, 0.10718344778577717, 1e-5); //python3.6 } BOOST_AUTO_TEST_CASE(S_fisherExactTest) { double prob = fisherExactTest<double>(1982, 3018, 2056, 2944); BOOST_CHECK_CLOSE(prob, 0.1367998254147284, 1e-5); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELTRAINERDEFAULTDEVICE_H #define EVONET_MODELTRAINERDEFAULTDEVICE_H // .h #include <EvoNet/ml/ModelTrainer.h> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> // .cpp namespace EvoNet { /** @brief Class to train a network model */ template<typename TensorT> class ModelTrainerDefaultDevice : public ModelTrainer<TensorT, ModelInterpreterDefaultDevice<TensorT>> { public: ModelTrainerDefaultDevice() = default; ///< Default constructor ~ModelTrainerDefaultDevice() = default; ///< Default destructor }; } #endif //EVONET_MODELTRAINERDEFAULTDEVICE_H<file_sep> # -------------------------------------------------------------------------- set(IGNORE_FILES_IN_BUILD_DIRECTORY "^${PROJECT_BINARY_DIR}") # -------------------------------------------------------------------------- # add_cpplint_tests : This macro generates cpplint tests for files in the # given directory. # # The function searches for all sources files in the given directory and # and generates a cpplint tests for each individual file. macro(add_cpplint_tests _directory) # find files in _directory file(GLOB_RECURSE _source_files RELATIVE ${SMARTPEAK_HOST_DIRECTORY}/src/${_directory}/ ${SMARTPEAK_HOST_DIRECTORY}/src/${_directory}/*.cpp) # add tests foreach(_file_to_test ${_source_files}) string( REGEX MATCH ${DO_NOT_TEST_THESE_FILES_REGEX} _do_not_test ${_file_to_test} ) string( REGEX MATCH ${IGNORE_FILES_IN_BUILD_DIRECTORY} _is_in_bin_dir ${_file_to_test}) if(NOT _do_not_test AND NOT _is_in_bin_dir) set(_test_name "src/${_directory}/${_file_to_test}_cpplint_test") add_test(${_test_name} "${PYTHON_EXECUTABLE}" "${PROJECT_SOURCE_DIR}/cpplint.py" "--verbose=5" "${SMARTPEAK_HOST_DIRECTORY}/src/${_directory}/${_file_to_test}") set_tests_properties( ${_test_name} PROPERTIES FAIL_REGULAR_EXPRESSION "${CPPLINT_FAIL_REGULAR_EXPRESSION}") endif() endforeach() endmacro() # -------------------------------------------------------------------------- # create tests for all files in the individual file groups add_cpplint_tests("smartpeak") <file_sep>## define some source directories set(CF_EVONET_DATA_PATH ${EVONET_HOST_DIRECTORY}/share/EvoNet CACHE INTERNAL "Path to the shared documents of EvoNet.") set(CF_EVONET_DOC_PATH ${EVONET_HOST_DIRECTORY}/doc CACHE INTERNAL "Path to the documentation of EvoNet.") ## and the corresponding ones when installed set(CF_EVONET_INSTALL_DATA_PATH ${INSTALL_SHARE_DIR} CACHE INTERNAL "Path to the installed shared documents of EvoNet.") set(CF_EVONET_INSTALL_DOC_PATH ${INSTALL_DOC_DIR} CACHE INTERNAL "Path to the installed documentation of EvoNet." ) ## check for Microsoft Visual Studio compiler if (MSVC) set(EVONET_COMPILER_MSVC "1" CACHE INTERNAL "Do we use Microsoft Compiler?") endif() ## check for G++ if (CMAKE_COMPILER_IS_GNUCXX) set(EVONET_COMPILER_GXX "1" CACHE INTERNAL "Do we use G++ Compiler?") endif() INCLUDE(TestBigEndian) TEST_BIG_ENDIAN(EVONET_BIG_ENDIAN) ## check 32/64 bit architecture (defined above!) if (NOT DEFINED EVONET_64BIT_ARCHITECTURE) message(FATAL_ERROR "Cmake script was re-ordered and is now invalid! Please make sure that EVONET_64BIT_ARCHITECTURE is defined when config.h.in is configured!") endif() include(CheckTypeSize) ## Check sizeof a type CHECK_TYPE_SIZE("unsigned char" SIZE_UCHAR) CHECK_TYPE_SIZE("unsigned short" SIZE_USHORT) CHECK_TYPE_SIZE("unsigned int" SIZE_UINT) CHECK_TYPE_SIZE("unsigned long" SIZE_ULONG) CHECK_TYPE_SIZE("unsigned long long" SIZE_ULONGLONG) CHECK_TYPE_SIZE("short" SIZE_SHORT) CHECK_TYPE_SIZE("int" SIZE_INT) CHECK_TYPE_SIZE("long" SIZE_LONG) CHECK_TYPE_SIZE("long long" SIZE_LONGLONG) CHECK_TYPE_SIZE("int32_t" SIZE_INT32) if (HAVE_SIZE_INT32) set(CF_EVONET_INT32_TYPE int32_t) else() ## search for another Int32 type if (SIZE_INT MATCHES "4") set(CF_EVONET_INT32_TYPE int) elseif (SIZE_SHORT MATCHES "4") set(CF_EVONET_INT32_TYPE short) elseif (SIZE_LONG MATCHES "4") set(CF_EVONET_INT32_TYPE long) else() Message(FATAL_ERROR "Cannot find signed 32bit integer type. Please contact the developers!") endif() endif() CHECK_TYPE_SIZE("int64_t" SIZE_INT64) if (HAVE_SIZE_INT64) set(CF_EVONET_INT64_TYPE int64_t) else() ## search for another Int64 type if (SIZE_INT MATCHES "8") set(CF_EVONET_INT64_TYPE int) elseif (SIZE_LONG MATCHES "8") set(CF_EVONET_INT64_TYPE long) elseif (SIZE_LONGLONG MATCHES "8") set(CF_EVONET_INT64_TYPE "long long") else() Message(FATAL_ERROR "Cannot find signed 64bit integer type. Please contact the developers!") endif() endif() CHECK_TYPE_SIZE("uint8_t" SIZE_UINT8) if (HAVE_SIZE_UINT8) set(CF_EVONET_BYTE_TYPE uint8_t) else() ## search for another uint8 type if (SIZE_UCHAR MATCHES "1") set(CF_EVONET_BYTE_TYPE "unsigned char") elseif (SIZE_USHORT MATCHES "1") set(CF_EVONET_BYTE_TYPE "unsigned short") else() Message(FATAL_ERROR "Cannot find unsigned 8bit integer (byte) type. Please contact the developers!") endif() endif() CHECK_TYPE_SIZE("uint32_t" SIZE_UINT32) if (HAVE_SIZE_UINT32) set(CF_EVONET_UINT32_TYPE uint32_t) else() ## search for another UInt32 type if (SIZE_UINT MATCHES "4") set(CF_EVONET_UINT32_TYPE "unsigned int") elseif (SIZE_USHORT MATCHES "4") set(CF_EVONET_UINT32_TYPE "unsigned short") elseif (SIZE_ULONG MATCHES "4") set(CF_EVONET_UINT32_TYPE "unsigned long") else() Message(FATAL_ERROR "Cannot find unsigned 32bit integer type. Please contact the developers!") endif() endif() CHECK_TYPE_SIZE("uint64_t" SIZE_UINT64) if (HAVE_SIZE_UINT64) set(CF_EVONET_UINT64_TYPE uint64_t) else() ## search for another uint64 type if (SIZE_ULONG MATCHES "8") set(CF_EVONET_UINT64_TYPE "unsigned long") elseif (SIZE_ULONGLONG MATCHES "8") set(CF_EVONET_UINT64_TYPE "unsigned long long") else() Message(FATAL_ERROR "Cannot find uint64 type. Please contact the developers!") endif() endif() #------------------------------------------------------------------------------ ## Check if various system heards exist include(CheckIncludeFileCXX) CHECK_INCLUDE_FILE_CXX("unistd.h" EVONET_HAS_UNISTD_H) CHECK_INCLUDE_FILE_CXX("process.h" EVONET_HAS_PROCESS_H) CHECK_INCLUDE_FILE_CXX("time.h" EVONET_HAS_TIME_H) CHECK_INCLUDE_FILE_CXX("sys/types.h" EVONET_HAS_SYS_TYPES_H) CHECK_INCLUDE_FILE_CXX("sys/times.h" EVONET_HAS_SYS_TIMES_H) CHECK_INCLUDE_FILE_CXX("sys/time.h" EVONET_HAS_SYS_TIME_H) CHECK_INCLUDE_FILE_CXX("stdint.h" EVONET_HAS_STDINT_H) #------------------------------------------------------------------------------ # check for libc++ bug try_run(_stream_bug_run_result_var _stream_bug_compile_var ${CMAKE_BINARY_DIR} ${EVONET_HOST_DIRECTORY}/cmake/modules/check_string_stream_bug.cxx) # set stream variable if(NOT _stream_bug_run_result_var) set(EVONET_HAS_STREAM_EXTRACTION_BUG "1") endif() #------------------------------------------------------------------------------ # check if certain c++ functions exist include(CheckFunctionExists) ## in MinGW we have the signal.h header, but no kill() as in Linux, so we need to check for the kill() function CHECK_FUNCTION_EXISTS("kill" EVONET_HAS_KILL) CHECK_FUNCTION_EXISTS("sysconf" EVONET_HAS_SYSCONF) #------------------------------------------------------------------------------ # Create the config.h # replace any variables in config.h.in with current values set (CONFIGURED_CONFIG_H ${PROJECT_BINARY_DIR}/include/EvoNet/config.h) configure_file(${PROJECT_SOURCE_DIR}/include/EvoNet/config.h.in ${CONFIGURED_CONFIG_H}) #------------------------------------------------------------------------------ # Create evonet_package_version.h # replace any variables in evonet_package_version.h.in with current values set (CONFIGURED_EVONET_PACKAGE_VERSION_H ${PROJECT_BINARY_DIR}/include/EvoNet/evonet_package_version.h) configure_file(${PROJECT_SOURCE_DIR}/include/EvoNet/evonet_package_version.h.in ${CONFIGURED_EVONET_PACKAGE_VERSION_H}) #------------------------------------------------------------------------------ # create paths header set(CONFIGURED_EVONET_DATA_PATH_H ${PROJECT_BINARY_DIR}/include/EvoNet/evonet_data_path.h) configure_file(${PROJECT_SOURCE_DIR}/include/EvoNet/evonet_data_path.h.in ${CONFIGURED_EVONET_DATA_PATH_H}) #------------------------------------------------------------------------------ # export a list of all configured heders set(EvoNet_configured_headers "${CONFIGURED_CONFIG_H};${CONFIGURED_EVONET_PACKAGE_VERSION_H};${CONFIGURED_EVONET_DATA_PATH_H}") <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE CircuitFinder test suite // #include <boost/test/unit_test.hpp> // changes every so often... #include <boost/test/included/unit_test.hpp> #include <EvoNet/graph/CircuitFinder.h> #include <EvoNet/ml/Link.h> #include <EvoNet/ml/Node.h> #include <vector> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(circuitFinder) BOOST_AUTO_TEST_CASE(test) { std::list<int>* A1; A1 = new std::list<int>[5]; A1[0].push_back(2); A1[1].push_back(2); A1[1].push_back(3); A1[1].push_back(4); A1[2].push_back(5); A1[3].push_back(3); A1[4].push_back(1); CircuitFinder CF1(A1, 5); CF1.run(); BOOST_CHECK_EQUAL(CF1.getCycles()[0].first, 5); BOOST_CHECK_EQUAL(CF1.getCycles()[1].first, 5); BOOST_CHECK_EQUAL(CF1.getCycles()[2].first, 2); BOOST_CHECK_EQUAL(CF1.getCycles()[0].second, 1); BOOST_CHECK_EQUAL(CF1.getCycles()[1].second, 1); BOOST_CHECK_EQUAL(CF1.getCycles()[2].second, 2); std::list<int>* A2; A2 = new std::list<int>[6]; A2[0].push_back(2); A2[0].push_back(5); A2[1].push_back(3); A2[2].push_back(1); A2[2].push_back(2); A2[2].push_back(4); A2[2].push_back(6); A2[3].push_back(5); A2[4].push_back(2); A2[5].push_back(4); CircuitFinder CF2(A2, 6); CF2.run(); BOOST_CHECK_EQUAL(CF2.getCycles()[0].first, 3); BOOST_CHECK_EQUAL(CF2.getCycles()[1].first, 3); BOOST_CHECK_EQUAL(CF2.getCycles()[2].first, 3); BOOST_CHECK_EQUAL(CF2.getCycles()[3].first, 5); BOOST_CHECK_EQUAL(CF2.getCycles()[4].first, 5); BOOST_CHECK_EQUAL(CF2.getCycles()[0].second, 1); BOOST_CHECK_EQUAL(CF2.getCycles()[1].second, 1); BOOST_CHECK_EQUAL(CF2.getCycles()[2].second, 2); BOOST_CHECK_EQUAL(CF2.getCycles()[3].second, 2); BOOST_CHECK_EQUAL(CF2.getCycles()[4].second, 2); } BOOST_AUTO_TEST_SUITE_END()<file_sep>### example CMakeLists.txt to develop programs using OpenMS project("OpenMS_ExternalCodeTest") cmake_minimum_required(VERSION 3.0.2) ## list all your executables here (a corresponding .cpp file should exist, e.g. TestExternalCode.cpp) set(my_executables TestExternalCode ) ## list all classes here, which are required by your executables ## (all these classes will be linked into a library) set(my_sources ExampleLibraryFile.cpp ) ## find OpenMS package and register target "OpenMS" (our library) ## Note: This is customized to fit the nightly test scenario. In a ## regular build find_package(OpenMS) should be sufficient. find_package(OpenMS PATHS "$ENV{OPENMS_BUILD_TREE}" NO_CMAKE_PACKAGE_REGISTRY) # check whether the OpenMS package was found if (OpenMS_FOUND) ## include directories for OpenMS headers (and contrib) include_directories(${OpenMS_INCLUDE_DIRECTORIES}) ## append precompiler macros specific to OpenMS ## Warning: this could be harmful to your project. Check this ## if problems occur set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENMS_ADDCXX_FLAGS}") add_definitions(${OPENMS_DEFINITIONS}) ## library with additional classes from above add_library(my_custom_lib STATIC ${my_sources}) target_link_libraries(my_custom_lib OpenMS) ## add targets for the executables foreach(i ${my_executables}) # create the executable add_executable(${i} ${i}.cpp) ## link executables against OpenMS target_link_libraries(${i} OpenMS my_custom_lib) endforeach(i) else(OpenMS_FOUND) message(FATAL_ERROR "OpenMSConfig.cmake file not found!") endif(OpenMS_FOUND) ## Enable testing - for Nightly Build log include(Dart) add_test(TestExternalCode TestExternalCode) <file_sep>/**TODO: Add copyright*/ #ifndef EVONET_CVAEFULLYCONNGPU_H #define EVONET_CVAEFULLYCONNGPU_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> // .h #include <EvoNet/models/CVAEFullyConn.h> #include <EvoNet/ml/ModelInterpreterGpu.h> // .cpp #include <EvoNet/io/ModelInterpreterFileGpu.h> #include <EvoNet/io/ModelFile.h> namespace EvoNet { /** @brief TODO */ template<typename TensorT> class CVAEFullyConnGpu : public CVAEFullyConn<TensorT, ModelInterpreterGpu<TensorT>> { public: CVAEFullyConnGpu() = default; ///< Default constructor ~CVAEFullyConnGpu() = default; ///< Default destructor /// Overrides used in all examples void adaptiveTrainerScheduler(const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterGpu<TensorT>& model_interpreter, const std::vector<TensorT>& model_errors) override; void trainingModelLogger(const int& n_epochs, Model<TensorT>& model, ModelInterpreterGpu<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) override; void validationModelLogger(const int& n_epochs, Model<TensorT>& model, ModelInterpreterGpu<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) override; }; template<typename TensorT> inline void CVAEFullyConnGpu<TensorT>::adaptiveTrainerScheduler(const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterGpu<TensorT>& model_interpreter, const std::vector<TensorT>& model_errors) { // Check point the model every 1000 epochs if (n_epochs % 1000 == 0 && n_epochs != 0) { model_interpreter.getModelResults(model, false, true, false, false); //// save the model weights (Not needed if binary is working fine) //WeightFile<float> weight_data; //weight_data.storeWeightValuesCsv(model.getName() + "_" + std::to_string(n_epochs) + "_weights.csv", model.weights_); // save the model and tensors to binary // BUG: Stored binarized model does not change when the model has been loaded from binary ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileGpu<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } // copy the loss function helpers auto lossFunctionHelpers = this->getLossFunctionHelpers(); // Increase the KL divergence beta and capacity if (this->getLossFunctionHelpers().size() >= 2) { TensorT beta_c = this->beta_c_; TensorT beta_d = this->beta_d_; TensorT capacity_c = this->capacity_c_; TensorT capacity_d = this->capacity_d_; if (this->KL_divergence_warmup_) { TensorT scale_factor1 = (n_epochs - 100 > 0) ? n_epochs - 100 : 1; beta_c /= (2.5e4 / scale_factor1); if (beta_c > this->beta_c_) beta_c = this->beta_c_; beta_d /= (2.5e4 / scale_factor1); if (beta_d > this->beta_d_) beta_d = this->beta_d_; TensorT scale_factor2 = (n_epochs - 1.0e4 > 0) ? n_epochs - 1.0e4 : 1; capacity_c /= (1.5e4 / scale_factor2); if (capacity_c > this->capacity_c_) capacity_c = this->capacity_c_; capacity_d /= (1.5e4 * scale_factor2); if (capacity_d > this->capacity_d_) capacity_d = this->capacity_d_; } lossFunctionHelpers.at(1).loss_functions_.at(0) = std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>(1e-6, beta_c, capacity_c)); lossFunctionHelpers.at(2).loss_functions_.at(0) = std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>(1e-6, beta_c, capacity_c)); lossFunctionHelpers.at(3).loss_functions_.at(0) = std::make_shared<KLDivergenceCatLossOp<float>>(KLDivergenceCatLossOp<float>(1e-6, beta_d, capacity_d)); lossFunctionHelpers.at(1).loss_function_grads_.at(0) = std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>(1e-6, beta_c, capacity_c)); lossFunctionHelpers.at(2).loss_function_grads_.at(0) = std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>(1e-6, beta_c, capacity_c)); lossFunctionHelpers.at(3).loss_function_grads_.at(0) = std::make_shared<KLDivergenceCatLossGradOp<float>>(KLDivergenceCatLossGradOp<float>(1e-6, beta_d, capacity_d)); } // Modulate the level of supervision if (this->getLossFunctionHelpers().size() >= 5) { std::random_device rd; std::uniform_int_distribution<int> distribution(1, 100); std::mt19937 engine(rd()); int value = distribution(engine); TensorT supervision = 1.0; if (value > this->supervision_percent_) supervision = 0.0; if (this->supervision_warmup_) supervision = (n_epochs - 2.5e4 > 0) ? supervision : 1.0; lossFunctionHelpers.at(4).loss_functions_.at(0) = std::make_shared<CrossEntropyWithLogitsLossOp<float>>(CrossEntropyWithLogitsLossOp<float>(1e-6, supervision * this->classification_loss_weight_)); lossFunctionHelpers.at(4).loss_function_grads_.at(0) = std::make_shared<CrossEntropyWithLogitsLossGradOp<float>>(CrossEntropyWithLogitsLossGradOp<float>(1e-6, supervision * this->classification_loss_weight_)); } // Update the loss function helpers this->setLossFunctionHelpers(lossFunctionHelpers); } template <typename TensorT> inline void CVAEFullyConnGpu<TensorT>::trainingModelLogger(const int& n_epochs, Model<TensorT>& model, ModelInterpreterGpu<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); model_logger.setLogNodeInputsEpoch(false); model_logger.setLogNodeOutputsEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1000 == 0) { // FIXME model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_interpreter.getModelResults(model, true, false, false, true); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->getMetricNamesLinearized()) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } template<typename TensorT> inline void CVAEFullyConnGpu<TensorT>::validationModelLogger(const int& n_epochs, Model<TensorT>& model, ModelInterpreterGpu<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) { // Per n epoch logging model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_interpreter.getModelResults(model, true, false, false, true); // initialize all logs if (n_epochs == 0) { model_logger.initLogs(model); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Test_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->getMetricNamesLinearized()) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } } #endif #endif //EVONET_CVAEFULLYCONNGPU_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_INTERPRETER_H #define EVONET_INTERPRETER_H #include <EvoNet/ml/Node.h> #include <EvoNet/ml/Link.h> #include <vector> namespace EvoNet { /** @brief Execution graph interpreter for a network model. The execution graph is modeled as a DAG of tensors (composed of multiple scalar nodes and scalar weights) with input tensors, output tensors, and intemediate tensors. The tensors are defined based on the network model structure and node types of the model. Intended sequence of events: Construct execution graph from the network model For n epochs: Set the input data Set the expected data (if training/validating) Foward propogation: 1. f(source * weights) = sinks 2. calculate the derivatives for back propogation Back propogation (if training): 1. sinks * weights . derivatives = sources 2. adjust the weights Update the network model from the execution graph tensors (if training) TODO: rename to Trainer */ class Interpreter { public: Interpreter(); ///< Default constructor ~Interpreter(); ///< Default destructor /** @brief Allocate tensor dimensions. @returns Status True on success, False if not */ bool allocateTensorMemory(); /** @brief Check if tensor dimensions are consistent with respect to input and out tensors. this changes consistent_ to be false if dimensions do not match. @returns Status True on success, False if not */ bool checkTensorDimensions(); /** @brief Check if the tensor indices for each layer are constent. this changes consistent_ to be false if indices are out of bounds. @returns Status True on success, False if not */ bool checkTensorIndices(); private: // Array of indices representing the tensors that are inputs to the // interpreter. std::vector<int> inputs_tensors_; // Array of indices representing the tensors that are outputs to the // interpreter. std::vector<int> output_tensors_; // Array of indices representing the order of tensors // in the execution graph. std::vector<int> execution_graph_; // Whether the tensors of the execution graph are consistent // with respect to dimensions and indices. bool consistent_ = true; // Whether the model is safe to invoke (if any errors occurred this // will be false). bool invokable_ = false; }; } #endif //EVONET_INTERPRETER_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_TENSORINTEGRATIONFUNCTION_H #define EVONET_TENSORINTEGRATIONFUNCTION_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif #include <EvoNet/core/Preprocessing.h> #include <EvoNet/ml/ActivationFunctionTensor.h> #include <unsupported/Eigen/CXX11/Tensor> #include <typeinfo> //#include <cereal/access.hpp> // serialiation of private members //#undef min // clashes with std::limit on windows in polymorphic.hpp //#undef max // clashes with std::limit on windows in polymorphic.hpp //#include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Base class for all integration functions. */ template<typename TensorT, typename DeviceT> class IntegrationTensorOp { public: IntegrationTensorOp() = default; IntegrationTensorOp(const TensorT& eps) : eps_(eps) {}; ~IntegrationTensorOp() = default; virtual std::string getName() const = 0; virtual void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) = 0; protected: TensorT eps_ = TensorT(1e-24); TensorT min_ = TensorT(-1e9); TensorT max_ = TensorT(1e9); //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(eps_); // } }; /** @brief Fully Connected Sum integration function */ template<typename TensorT, typename DeviceT> class SumTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: SumTensorOp() {}; ~SumTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { operator_(source_output, weights, sink_input, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); }; template<typename TT=TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* source_output, TT* weights, TT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device){ Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TT, 3>> weight_tensor(weights, 1, source_layer_size, sink_layer_size); auto weight_tensor_exp = weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); auto source_bcast = source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })); sink_input_tensor.chip(sink_time_step, 1).device(device) += (source_bcast * weight_tensor_exp).sum(Eigen::array<int, 1>({ 1 })).clip(this->min_, this->max_).eval(); } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* source_output, TT* weights, TT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device){ Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_tensor(weights, source_layer_size, sink_layer_size); Eigen::array<Eigen::IndexPair<int>, 1> product_dims = { Eigen::IndexPair<int>(1, 0) }; sink_input_tensor.chip(sink_time_step, 1).device(device) += (source_output_tensor.chip(source_time_step, 1)).contract(weight_tensor, product_dims).clip(this->min_, this->max_).eval(); } std::string getName() const { return "SumTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Prod integration function */ template<typename TensorT, typename DeviceT> class ProdTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: ProdTensorOp() {}; ~ProdTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight_tensor(weights, 1, source_layer_size, sink_layer_size); //// DEBUG (only on CPU) //std::cout << "[ProdTensorOp]Sink (Start): " << sink_input_tensor.chip(sink_time_step, 1) << std::endl; // Step 1: expand source across the sink layer dim and weight tensor across the batch dim and multiply auto weight_tensor_exp = weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); auto source_bcast = source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })); TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size*source_layer_size*sink_layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * source_layer_size*sink_layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> source_weight_exp(tmp_data, batch_size, source_layer_size, sink_layer_size); source_weight_exp.device(device) = source_bcast * weight_tensor_exp; // Step 2: determine where the 0s in the original input are propogated to in the source_weight_exp tensor auto source_1 = (source_output_tensor.chip(source_time_step, 1) == source_output_tensor.chip(source_time_step, 1).constant(TensorT(0))).select( //auto source_1 = (source_output_tensor.chip(source_time_step, 1) > source_output_tensor.chip(source_time_step, 1).constant(-this->eps_) && // source_output_tensor.chip(source_time_step, 1) < source_output_tensor.chip(source_time_step, 1).constant(this->eps_)).select( source_output_tensor.chip(source_time_step, 1).constant(TensorT(1)), source_output_tensor.chip(source_time_step, 1).constant(TensorT(0))); auto source_weight_exp_1 = source_1.broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size }))*weight_tensor_exp; // Step 3: Substitute 1 for all 0 entries (assuming 0s are non entries) except for the 0s that were propogated from the source output auto source_weight_1 = ( (source_weight_exp == source_weight_exp.constant(TensorT(0))) && (source_weight_exp_1 != source_weight_exp.constant(TensorT(1))) //auto source_weight_1 = ( // (source_weight_exp > source_weight_exp.constant(-this->eps_) && source_weight_exp < source_weight_exp.constant(this->eps_)) && // (source_weight_exp_1 < source_weight_exp.constant(TensorT(1) - this->eps_) || source_weight_exp_1 > source_weight_exp.constant(TensorT(1) + this->eps_)) ).select(source_weight_exp.constant(TensorT(1)), source_weight_exp); // Step 4: multiply along the source dim sink_input_tensor.chip(sink_time_step, 1).device(device) = sink_input_tensor.chip(sink_time_step, 1) * (source_weight_1 ).prod(Eigen::array<int, 1>({ 1 })).clip(this->min_, this->max_).eval(); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif } std::string getName() const { return "ProdTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Prod Singly Connected integration function */ template<typename TensorT, typename DeviceT> class ProdSCTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: ProdSCTensorOp() {}; ~ProdSCTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { //assert(source_layer_size == sink_layer_size); // NOTE: Should work with optimized Weight tensors but this requires specialized methods for the solvers //Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); //Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size); //Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weight_tensor(weights, 1, source_layer_size); //sink_input_tensor.chip(sink_time_step, 1).device(device) = sink_input_tensor.chip(sink_time_step, 1) * // source_output_tensor.chip(source_time_step, 1) * weight_tensor.broadcast(Eigen::array<int, 2>({ batch_size, 1})); // NOTE: Works for diagonal weight tensors between source and sink layers Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight_tensor(weights, 1, source_layer_size, sink_layer_size); sink_input_tensor.chip(sink_time_step, 1).device(device) = sink_input_tensor.chip(sink_time_step, 1) * ( source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })) * weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })) ).sum(Eigen::array<int, 1>({ 1 })).eval(); //// DEBUG (only on CPU) //std::cout << "[ProdSCTensorOp]Source: " << source_output_tensor.chip(source_time_step, 1) << std::endl; //std::cout << "[ProdSCTensorOp]Weight: " << weight_tensor << std::endl; //std::cout << "[ProdSCTensorOp]Intermediate: " << source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })) * // weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })) << std::endl; //std::cout << "[ProdSCTensorOp]Sink (End): " << sink_input_tensor.chip(sink_time_step, 1) << std::endl; } std::string getName() const { return "ProdSCTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Max integration function */ template<typename TensorT, typename DeviceT> class MaxTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: MaxTensorOp() {}; ~MaxTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight_tensor(weights, 1, source_layer_size, sink_layer_size); sink_input_tensor.chip(sink_time_step, 1).device(device) = sink_input_tensor.chip(sink_time_step, 1).cwiseMax( (source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })) * weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })) ).maximum(Eigen::array<int, 1>({ 1 }))).clip(this->min_, this->max_).eval(); } std::string getName() const { return "MaxTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Min integration function */ template<typename TensorT, typename DeviceT> class MinTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: MinTensorOp() {}; ~MinTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight_tensor(weights, 1, source_layer_size, sink_layer_size); // Step 1: Substitute 1e24 for all 0 entries (assuming 0s are non entries) in the input auto sink_input_chip = sink_input_tensor.chip(sink_time_step, 1); auto sink_input_large = ( //sink_input_chip > sink_input_chip.constant(-this->eps_) && sink_input_chip < sink_input_chip.constant(this->eps_) sink_input_chip == sink_input_chip.constant(TensorT(0)) ).select(sink_input_chip.constant(TensorT(1e24)), sink_input_chip).eval(); // Step 2: expand source across the sink layer dim and weight tensor across the batch dim and multiply auto weight_tensor_exp = weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); auto source_weight_exp = (source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })).eval() * weight_tensor_exp); // Step 3: Substitute 1e24 for all 0 entries (assuming 0s are non entries) // This unfortunately requires temporary memory to remain under 4096 bytes TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size*source_layer_size*sink_layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * source_layer_size*sink_layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> source_weight_1(tmp_data, batch_size, source_layer_size, sink_layer_size); source_weight_1.device(device) = ( //weight_tensor_exp > weight_tensor_exp.constant(-this->eps_) && weight_tensor_exp < weight_tensor_exp.constant(this->eps_) weight_tensor_exp == weight_tensor_exp.constant(TensorT(0)) ).select(source_weight_exp.constant(TensorT(1e24)), source_weight_exp).eval(); // Step 4: Take the Minimum along the source dim auto sink_input_tensor_tmp = sink_input_large.cwiseMin(source_weight_1.minimum(Eigen::array<int, 1>({ 1 }))); // Step 5: Replace all 1e24 with 0 sink_input_tensor.chip(sink_time_step, 1).device(device) = (sink_input_tensor_tmp == sink_input_tensor_tmp.constant(TensorT(1e24))).select(sink_input_tensor_tmp.constant(TensorT(0)), sink_input_tensor_tmp).clip(this->min_, this->max_).eval(); if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif } std::string getName() const { return "MinTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Mean integration function */ template<typename TensorT, typename DeviceT> class MeanTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: MeanTensorOp() {}; ~MeanTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight(weights, 1, source_layer_size, sink_layer_size); sink_input_tensor.chip(sink_time_step, 1).device(device) = ( source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })) * weight.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })) ).mean(Eigen::array<int, 1>({ 1 })).clip(this->min_, this->max_).eval(); } std::string getName() const { return "MeanTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief VarMod integration function Modified variance integration function: 1/n Sum[0 to n](Xi)^2 where Xi = xi - u (u: mean, xi: single sample) */ template<typename TensorT, typename DeviceT> class VarModTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: VarModTensorOp() {}; ~VarModTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight(weights, 1, source_layer_size, sink_layer_size); auto input = source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })) * weight.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); // dim3 sink_input_tensor.chip(sink_time_step, 1).device(device) = ((input * input)*input.constant(TensorT(1) / (TensorT)source_layer_size)).sum(Eigen::array<int, 1>({ 1 })).clip(this->min_, this->max_).eval(); } std::string getName() const { return "VarModTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Var integration function */ template<typename TensorT, typename DeviceT> class VarTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: VarTensorOp() {}; ~VarTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 5>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> weight(weights, 1, source_layer_size, 1, sink_layer_size); auto mean = (source_output_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 4>({ 1, 1, 1, sink_layer_size })) * weight.broadcast(Eigen::array<int, 4>({ batch_size, 1, 1, 1 }))).mean(Eigen::array<int, 1>({ 1 })).broadcast(Eigen::array<int, 3>({ 1, source_layer_size, 1 })); // dim3 auto input = (source_output_tensor.chip(source_time_step, 1).chip(source_time_step, 3).broadcast(Eigen::array<int, 3>({ 1, 1, sink_layer_size })) * weight.chip(0, 2).broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })) - mean); // dim3 sink_input_tensor.chip(sink_time_step, 1).device(device) = ((input * input)*input.constant(TensorT(1) / (TensorT)source_layer_size)).sum(Eigen::array<int, 1>({ 1 })).clip(this->min_, this->max_).eval(); } std::string getName() const { return "VarTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Count integration function */ template<typename TensorT, typename DeviceT> class CountTensorOp : public IntegrationTensorOp<TensorT, DeviceT> { public: CountTensorOp() {}; ~CountTensorOp() {}; void operator()(TensorT* source_output, TensorT* weights, TensorT* sink_input, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_input_tensor(sink_input, batch_size, memory_size, sink_layer_size); sink_input_tensor.chip(sink_time_step, 1).device(device) += sink_input_tensor.chip(sink_time_step, 1).constant((TensorT)source_layer_size).clip(this->min_, this->max_).eval(); } std::string getName() const { return "CountTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Base class for all integration error functions. */ template<typename TensorT, typename DeviceT> class IntegrationErrorTensorOp { public: IntegrationErrorTensorOp() = default; IntegrationErrorTensorOp(const TensorT& eps) : eps_(eps) {}; ~IntegrationErrorTensorOp() = default; virtual std::string getName() const = 0; /* @brief Integration error void operator */ virtual void operator()(TensorT* source_error, TensorT *source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) = 0; protected: TensorT eps_ = TensorT(1e-24); TensorT min_ = TensorT(-1e9); TensorT max_ = TensorT(1e9); //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(eps_); // } }; /** @brief Fully Connected Sum integration error function */ template<typename TensorT, typename DeviceT> class SumErrorTensorOp : public IntegrationErrorTensorOp<TensorT, DeviceT> { public: SumErrorTensorOp() {}; ~SumErrorTensorOp() {}; void operator()(TensorT* source_error, TensorT *source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { operator_(source_error, source_input, weight, sink_output, sink_error, sink_derivative, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); }; template<typename TT = TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* source_error, TT* source_input, TT* weight, TT* sink_output, TT* sink_error, TT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_error_tensor(source_error, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> weight_tensor(weight, 1, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed auto source_error_bcast = source_error_tensor.chip(source_time_step, 1).broadcast(Eigen::array<Eigen::Index, 3>({ 1, sink_layer_size, 1 })); auto weight_bcast = weight_tensor.broadcast(Eigen::array<Eigen::Index, 3>({ batch_size, 1, 1 })); sink_error_tensor.chip(sink_time_step, 1).device(device) += ((source_error_bcast * weight_bcast).sum(Eigen::array<int, 1>({ 2 })) * sink_derivative_tensor.chip(sink_time_step, 1)).clip(this->min_, this->max_).eval(); } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* source_error, TT* source_input, TT* weight, TT* sink_output, TT* sink_error, TT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_error_tensor(source_error, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_tensor(weight, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed Eigen::array<Eigen::IndexPair<int>, 1> product_dims = { Eigen::IndexPair<int>(1, 0) }; // NOTE: we are taking the transpose of the weight matrix sink_error_tensor.chip(sink_time_step, 1).device(device) += ((source_error_tensor.chip(source_time_step, 1)).contract(weight_tensor.shuffle(Eigen::array<int, 2>({ 1, 0 })), product_dims) * (sink_derivative_tensor.chip(sink_time_step, 1))).clip(this->min_, this->max_).eval(); } std::string getName() const { return "SumErrorTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationErrorTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Product integration error function */ template<typename TensorT, typename DeviceT> class ProdErrorTensorOp : public IntegrationErrorTensorOp<TensorT, DeviceT> { public: ProdErrorTensorOp() {}; ~ProdErrorTensorOp() {}; void operator()(TensorT* source_error, TensorT *source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> sink_output_tensor(sink_output, batch_size, memory_size, sink_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_input_tensor(source_input, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_error_tensor(source_error, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight_tensor(weight, 1, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed // step 1: re-compute the intermediate tensor and expand the net input (dims: batch, source, sink) auto comp_tensor = sink_output_tensor.chip(sink_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, source_layer_size })) * weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); auto source_exp_input_tensor = source_input_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, sink_layer_size, 1 })); // step 2: divide out the comp_tensor, scale by the source error, and reduce by taking the sum along the source layer // NOTE for numerical stability, we return 0 for all comp_tensor elements that are 0 TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size*sink_layer_size*source_layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * sink_layer_size * source_layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> comp_tensor_clipped_neg(tmp_data, batch_size, sink_layer_size, source_layer_size); // Step 2 Option 1 //auto tmp = (source_exp_input_tensor * source_error_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, sink_layer_size, 1 })) // * comp_tensor / (comp_tensor * comp_tensor + comp_tensor.constant(this->eps_))).sum(Eigen::array<int, 1>({ 2 })); // calculate numerator auto tmp_numerator = source_exp_input_tensor * source_error_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, sink_layer_size, 1 })); // remove small values (both positive and negative) from the intermediate tensor comp_tensor_clipped_neg.device(device) = (comp_tensor > comp_tensor.constant(1 / this->min_) && comp_tensor < comp_tensor.constant(TensorT(0))).select( comp_tensor.constant(1 / this->min_), comp_tensor); auto comp_tensor_clipped_pos = (comp_tensor_clipped_neg <= comp_tensor_clipped_neg.constant(1 / this->max_) && comp_tensor > comp_tensor.constant(TensorT(0))).select( comp_tensor_clipped_neg.constant(1 / this->max_), comp_tensor_clipped_neg); // remove all 0's from the intermediate tensor and finish the calculation auto tmp_non_zero = (comp_tensor_clipped_neg != comp_tensor_clipped_neg.constant(TensorT(0))).select( tmp_numerator / comp_tensor_clipped_pos, comp_tensor_clipped_neg.constant(TensorT(0))).sum(Eigen::array<int, 1>({ 2 })); sink_error_tensor.chip(sink_time_step, 1).device(device) += (tmp_non_zero * sink_derivative_tensor.chip(sink_time_step, 1)).clip(this->min_, this->max_).eval(); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif //// DEBUG (only on CPU) //std::cout << "[ProdErrorTensorOp]comp_tensor: " << comp_tensor << std::endl; //std::cout << "[ProdErrorTensorOp]tmp: " << tmp << std::endl; //std::cout << "[ProdErrorTensorOp]sink_error_tensor (End): " << sink_error_tensor.chip(sink_time_step, 1) << std::endl; }; std::string getName() const { return "ProdErrorTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationErrorTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Max integration error function */ template<typename TensorT, typename DeviceT> class MaxErrorTensorOp : public IntegrationErrorTensorOp<TensorT, DeviceT> { public: MaxErrorTensorOp() {}; ~MaxErrorTensorOp() {}; void operator()(TensorT* source_error, TensorT *source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> sink_output_tensor(sink_output, batch_size, memory_size, sink_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_input_tensor(source_input, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_error_tensor(source_error, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight_tensor(weight, 1, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed // step 1: determine the maximum auto comp_tensor = sink_output_tensor.chip(sink_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, source_layer_size })) * weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); auto max_tensor = source_input_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, sink_layer_size, 1 })); auto selection_tensor = ((comp_tensor - max_tensor).abs() > (max_tensor.constant(TensorT(0)) - max_tensor.constant(this->eps_)) && (comp_tensor - max_tensor).abs() < (max_tensor.constant(TensorT(0)) + max_tensor.constant(this->eps_))).select(max_tensor.constant(TensorT(1)), max_tensor.constant(TensorT(0))); // step 2: select out the error to propogate auto error = source_error_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, sink_layer_size, 1 })) * weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); auto selected_error = (error * selection_tensor).sum(Eigen::array<int, 1>({ 2 })); // sum along the source layer sink_error_tensor.chip(sink_time_step, 1).device(device) += (selected_error * sink_derivative_tensor.chip(sink_time_step, 1)).clip(this->min_, this->max_).eval(); }; std::string getName() const { return "MaxErrorTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationErrorTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Min integration error function */ template<typename TensorT, typename DeviceT> class MinErrorTensorOp : public IntegrationErrorTensorOp<TensorT, DeviceT> { public: MinErrorTensorOp() {}; ~MinErrorTensorOp() {}; void operator()(TensorT* source_error, TensorT *source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> sink_output_tensor(sink_output, batch_size, memory_size, sink_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_input_tensor(source_input, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_error_tensor(source_error, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> weight_tensor(weight, 1, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed // step 1: determine the minimum auto comp_tensor = sink_output_tensor.chip(sink_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, 1, source_layer_size })) * weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); auto min_tensor = source_input_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, sink_layer_size, 1 })); auto selection_tensor = ((comp_tensor - min_tensor).abs() > (min_tensor.constant(TensorT(0)) - min_tensor.constant(this->eps_)) && (comp_tensor - min_tensor).abs() < (min_tensor.constant(TensorT(0)) + min_tensor.constant(this->eps_))).select(min_tensor.constant(TensorT(1)), min_tensor.constant(TensorT(0))); // step 2: select out the error to propogate auto error = source_error_tensor.chip(source_time_step, 1).broadcast(Eigen::array<int, 3>({ 1, sink_layer_size, 1 })) * weight_tensor.broadcast(Eigen::array<int, 3>({ batch_size, 1, 1 })); auto selected_error = (error * selection_tensor).sum(Eigen::array<int, 1>({ 2 })); // sum along the source layer sink_error_tensor.chip(sink_time_step, 1).device(device) += (selected_error * sink_derivative_tensor.chip(sink_time_step, 1)).clip(this->min_, this->max_).eval(); }; std::string getName() const { return "MinErrorTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationErrorTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Mean integration error function */ template<typename TensorT, typename DeviceT> class MeanErrorTensorOp : public IntegrationErrorTensorOp<TensorT, DeviceT> { public: MeanErrorTensorOp() {}; ~MeanErrorTensorOp() {}; void operator()(TensorT* source_error, TensorT* source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { operator_(source_error, source_input, weight, sink_output, sink_error, sink_derivative, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); }; template<typename TT = TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* source_error, TT* source_input, TT* weight, TT* sink_output, TT* sink_error, TT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_error_tensor(source_error, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> weight_tensor(weight, 1, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed auto source_error_bcast = source_error_tensor.chip(source_time_step, 1).broadcast(Eigen::array<Eigen::Index, 3>({ 1, sink_layer_size, 1 })); auto weight_bcast = weight_tensor.broadcast(Eigen::array<Eigen::Index, 3>({ batch_size, 1, 1 })); sink_error_tensor.chip(sink_time_step, 1).device(device) += ((source_error_bcast * weight_bcast).sum(Eigen::array<int, 1>({ 2 })) * sink_error_tensor.chip(sink_time_step, 1).constant(TT(1) / (TT)n_input_nodes) * sink_derivative_tensor.chip(sink_time_step, 1)).clip(this->min_, this->max_).eval(); } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* source_error, TT* source_input, TT* weight, TT* sink_output, TT* sink_error, TT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_error_tensor(source_error, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_tensor(weight, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed Eigen::array<Eigen::IndexPair<int>, 1> product_dims = { Eigen::IndexPair<int>(1, 0) }; // NOTE: we are taking the transpose of the weight matrix sink_error_tensor.chip(sink_time_step, 1).device(device) += ((source_error_tensor.chip(source_time_step, 1)).contract(weight_tensor.shuffle(Eigen::array<int, 2>({ 1, 0 })), product_dims) * sink_error_tensor.chip(sink_time_step, 1).constant(TT(1)/(TT)n_input_nodes) * sink_derivative_tensor.chip(sink_time_step, 1)).clip(this->min_, this->max_).eval(); } std::string getName() const { return "MeanErrorTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationErrorTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief VarMod integration error function */ template<typename TensorT, typename DeviceT> class VarModErrorTensorOp : public IntegrationErrorTensorOp<TensorT, DeviceT> { public: VarModErrorTensorOp() {}; ~VarModErrorTensorOp() {}; void operator()(TensorT* source_error, TensorT* source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { operator_(source_error, source_input, weight, sink_output, sink_error, sink_derivative, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device); }; template<typename TT = TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* source_error, TT* source_input, TT* weight, TT* sink_output, TT* sink_error, TT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_error_tensor(source_error, batch_size, memory_size, 1, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> weight_tensor(weight, 1, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed auto source_error_bcast = source_error_tensor.chip(source_time_step, 1).broadcast(Eigen::array<Eigen::Index, 3>({ 1, sink_layer_size, 1 })); auto weight_bcast = weight_tensor.broadcast(Eigen::array<Eigen::Index, 3>({ batch_size, 1, 1 })); sink_error_tensor.chip(sink_time_step, 1).device(device) += ((source_error_bcast * weight_bcast).sum(Eigen::array<int, 1>({ 2 })) * sink_error_tensor.chip(sink_time_step, 1).constant(TT(1) / (TT)n_input_nodes).eval() * sink_error_tensor.chip(sink_time_step, 1).constant((TT)2).eval() * sink_derivative_tensor.chip(sink_time_step, 1)).clip(this->min_, this->max_).eval(); } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* source_error, TT* source_input, TT* weight, TT* sink_output, TT* sink_error, TT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_derivative_tensor(sink_derivative, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_error_tensor(source_error, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_tensor(weight, sink_layer_size, source_layer_size); // NOTE: source/sink are reversed Eigen::array<Eigen::IndexPair<int>, 1> product_dims = { Eigen::IndexPair<int>(1, 0) }; // NOTE: we are taking the transpose of the weight matrix sink_error_tensor.chip(sink_time_step, 1).device(device) += ((source_error_tensor.chip(source_time_step, 1)).contract(weight_tensor.shuffle(Eigen::array<int, 2>({ 1, 0 })), product_dims) * sink_error_tensor.chip(sink_time_step, 1).constant(TT(1) / (TT)n_input_nodes).eval() * sink_error_tensor.chip(sink_time_step, 1).constant((TT)2).eval() * sink_derivative_tensor.chip(sink_time_step, 1)).clip(this->min_, this->max_).eval(); } std::string getName() const { return "VarModErrorTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationErrorTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Var integration error function */ template<typename TensorT, typename DeviceT> class VarErrorTensorOp : public IntegrationErrorTensorOp<TensorT, DeviceT> { public: VarErrorTensorOp() {}; ~VarErrorTensorOp() {}; void operator()(TensorT* source_error, TensorT *source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { //TODO }; std::string getName() const { return "VarErrorTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationErrorTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Count integration error function */ template<typename TensorT, typename DeviceT> class CountErrorTensorOp : public IntegrationErrorTensorOp<TensorT, DeviceT> { public: CountErrorTensorOp() {}; ~CountErrorTensorOp() {}; void operator()(TensorT* source_error, TensorT *source_input, TensorT* weight, TensorT* sink_output, TensorT* sink_error, TensorT* sink_derivative, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, const int& source_time_step, const int& sink_time_step, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); sink_error_tensor.chip(sink_time_step, 1).device(device) = sink_error_tensor.chip(sink_time_step, 1).constant(TensorT(0)).clip(this->min_, this->max_).eval(); }; std::string getName() const { return "CountErrorTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationErrorTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Base class for all integration error functions. */ template<typename TensorT, typename DeviceT> class IntegrationWeightGradTensorOp { public: IntegrationWeightGradTensorOp() = default; IntegrationWeightGradTensorOp(const TensorT& eps) : eps_(eps) {}; ~IntegrationWeightGradTensorOp() = default; virtual std::string getName() const = 0; virtual void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) = 0; protected: TensorT eps_ = TensorT(1e-24); TensorT min_ = TensorT(-1e9); TensorT max_ = TensorT(1e9); //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(eps_); // } }; /** @brief Fully Connected Sum integration error function */ template<typename TensorT, typename DeviceT> class SumWeightGradTensorOp : public IntegrationWeightGradTensorOp<TensorT, DeviceT> { public: void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { operator_(sink_error, source_output, weight, source_input, weight_error, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, device); }; template<typename TT = TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 4>> sink_error_tensor(sink_error, batch_size, memory_size, 1, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); auto sink_error_bcast = sink_error_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, source_layer_size, 1 })); auto source_output_bcast = source_output_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, 1, sink_layer_size })); auto tmp = -(source_output_bcast * sink_error_bcast).sum(Eigen::array<int, 2>({ 0, 1 })); weight_error_tensor.device(device) += (tmp * tmp.constant(TT(1) / (TT)batch_size)).clip(this->min_, this->max_).eval(); } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); Eigen::array<Eigen::IndexPair<int>, 2> double_contraction_product_dims = { Eigen::IndexPair<int>(1,1), Eigen::IndexPair<int>(0,0) }; auto tmp = -source_output_tensor.contract(sink_error_tensor, double_contraction_product_dims); // NOTE: Double contraction along the memory and batch (equivalent to a double sum along the products of the batch and memory dimensions) weight_error_tensor.device(device) += (tmp * tmp.constant(TT(1) / (TT)batch_size)).clip(this->min_, this->max_).eval(); // NOTE: Requires a correction by dividing by the batch size } std::string getName() const { return "SumWeightGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationWeightGradTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Product integration error function */ template<typename TensorT, typename DeviceT> class ProdWeightGradTensorOp : public IntegrationWeightGradTensorOp<TensorT, DeviceT> { public: ProdWeightGradTensorOp() {}; ~ProdWeightGradTensorOp() {}; void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> sink_error_tensor(sink_error, batch_size, memory_size, 1, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> source_input_tensor(source_input, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> weight_tensor(weight, 1, 1, source_layer_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); // step 0: remove small values and 0's from the weight_tensor for numerical stability auto weight_tensor_exp = weight_tensor.broadcast(Eigen::array<int, 4>({ batch_size, memory_size, 1, 1 })); auto weight_tensor_exp_clipped_neg = (weight_tensor_exp > weight_tensor_exp.constant(1 / this->min_) && weight_tensor_exp < weight_tensor_exp.constant(TensorT(0))).select( weight_tensor_exp.constant(1 / this->min_), weight_tensor_exp); auto weight_tensor_exp_clipped_pos = (weight_tensor_exp <= weight_tensor_exp.constant(1 / this->max_) && weight_tensor_exp > weight_tensor_exp.constant(TensorT(0))).select( weight_tensor_exp_clipped_neg.constant(1 / this->max_), weight_tensor_exp_clipped_neg); // step 1: compute the weight-normalized source net input expanded across batch and memory // NOTE for numerical stability we multiply by the weight_tensor and then divide by the square of the weight tensor plus a small number to avoid dividing by 0 //auto input_normalized_tensor = source_input_tensor.broadcast(Eigen::array<int, 4>({ 1, 1, 1, sink_layer_size })) * weight_tensor_exp / (weight_tensor_exp*weight_tensor_exp + weight_tensor_exp.constant(this->eps_)); TensorT* tmp_data; if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { tmp_data = new TensorT[batch_size * memory_size * source_layer_size * sink_layer_size]; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { size_t bytes = batch_size * memory_size * source_layer_size * sink_layer_size * sizeof(TensorT); assert(cudaMalloc((void**)(&tmp_data), bytes) == cudaSuccess); } #endif Eigen::TensorMap<Eigen::Tensor<TensorT, 4>> input_normalized_tensor(tmp_data, batch_size, memory_size, source_layer_size, sink_layer_size); input_normalized_tensor.device(device) = (weight_tensor_exp != weight_tensor_exp.constant(TensorT(0))).select( source_input_tensor.broadcast(Eigen::array<int, 4>({ 1, 1, 1, sink_layer_size })) / weight_tensor_exp_clipped_pos, weight_tensor_exp.constant(TensorT(0))); // step 2: scale to the sink error auto scaled_error = -sink_error_tensor.broadcast(Eigen::array<int, 4>({ 1, 1, source_layer_size, 1 })) * input_normalized_tensor; // step 3: sum along the memory and average along the batch dimensions weight_error_tensor.device(device) += (scaled_error.sum(Eigen::array<int, 2>({ 0, 1 })) * weight_error_tensor.constant(TensorT(1) / (TensorT)batch_size)).clip(this->min_, this->max_).eval(); // Deallocate temporary memory if (typeid(device).name() == typeid(Eigen::DefaultDevice).name()) { delete[] tmp_data; } #if COMPILE_WITH_CUDA else if (typeid(device).name() == typeid(Eigen::GpuDevice).name()) { assert(cudaFree(tmp_data) == cudaSuccess); } #endif }; std::string getName() const { return "ProdWeightGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationWeightGradTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Max integration error function */ template<typename TensorT, typename DeviceT> class MaxWeightGradTensorOp : public IntegrationWeightGradTensorOp<TensorT, DeviceT> { public: MaxWeightGradTensorOp() {}; ~MaxWeightGradTensorOp() {}; void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { operator_(sink_error, source_output, weight, source_input, weight_error, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, device); }; template<typename TT = TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 4>> sink_error_tensor(sink_error, batch_size, memory_size, 1, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); auto sink_error_bcast = sink_error_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, source_layer_size, 1 })); auto source_output_bcast = source_output_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, 1, sink_layer_size })); auto tmp = -(source_output_bcast * sink_error_bcast).sum(Eigen::array<int, 2>({ 0, 1 })); weight_error_tensor.device(device) += (tmp * tmp.constant(TT(1) / (TT)batch_size)).clip(this->min_, this->max_).eval(); } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); Eigen::array<Eigen::IndexPair<int>, 2> double_contraction_product_dims = { Eigen::IndexPair<int>(1,1), Eigen::IndexPair<int>(0,0) }; auto tmp = -source_output_tensor.contract(sink_error_tensor, double_contraction_product_dims); // NOTE: Double contraction along the memory and batch (equivalent to a double sum along the products of the batch and memory dimensions) weight_error_tensor.device(device) += (tmp * tmp.constant(TT(1) / (TT)batch_size)).clip(this->min_, this->max_).eval(); // NOTE: Requires a correction by dividing by the batch size } std::string getName() const { return "MaxWeightGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationWeightGradTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Min integration error function */ template<typename TensorT, typename DeviceT> class MinWeightGradTensorOp : public IntegrationWeightGradTensorOp<TensorT, DeviceT> { public: MinWeightGradTensorOp() {}; ~MinWeightGradTensorOp() {}; void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { operator_(sink_error, source_output, weight, source_input, weight_error, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, device); }; template<typename TT = TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 4>> sink_error_tensor(sink_error, batch_size, memory_size, 1, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); auto sink_error_bcast = sink_error_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, source_layer_size, 1 })); auto source_output_bcast = source_output_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, 1, sink_layer_size })); auto tmp = -(source_output_bcast * sink_error_bcast).sum(Eigen::array<int, 2>({ 0, 1 })); weight_error_tensor.device(device) += (tmp * tmp.constant(TT(1) / (TT)batch_size)).clip(this->min_, this->max_).eval(); } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); Eigen::array<Eigen::IndexPair<int>, 2> double_contraction_product_dims = { Eigen::IndexPair<int>(1,1), Eigen::IndexPair<int>(0,0) }; auto tmp = -source_output_tensor.contract(sink_error_tensor, double_contraction_product_dims); // NOTE: Double contraction along the memory and batch (equivalent to a double sum along the products of the batch and memory dimensions) weight_error_tensor.device(device) += (tmp * tmp.constant(TT(1) / (TT)batch_size)).clip(this->min_, this->max_).eval(); // NOTE: Requires a correction by dividing by the batch size } std::string getName() const { return "MinWeightGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationWeightGradTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Count integration error function */ template<typename TensorT, typename DeviceT> class CountWeightGradTensorOp : public IntegrationWeightGradTensorOp<TensorT, DeviceT> { public: CountWeightGradTensorOp() {}; ~CountWeightGradTensorOp() {}; void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TensorT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); weight_error_tensor.device(device) = weight_error_tensor.constant(TensorT(0)); }; std::string getName() const { return "CountWeightGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationWeightGradTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Mean integration error function */ template<typename TensorT, typename DeviceT> class MeanWeightGradTensorOp : public IntegrationWeightGradTensorOp<TensorT, DeviceT> { public: MeanWeightGradTensorOp() {}; ~MeanWeightGradTensorOp() {}; void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { operator_(sink_error, source_output, weight, source_input, weight_error, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, device); }; template<typename TT = TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 4>> sink_error_tensor(sink_error, batch_size, memory_size, 1, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); auto sink_error_bcast = sink_error_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, source_layer_size, 1 })); auto source_output_bcast = source_output_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, 1, sink_layer_size })); auto tmp = -(source_output_bcast * sink_error_bcast).sum(Eigen::array<int, 2>({ 0, 1 })); weight_error_tensor.device(device) += (tmp * weight_error_tensor.constant(TT(1) / (TT)batch_size).eval() * weight_error_tensor.constant(TT(1) / (TT)n_input_nodes)).clip(this->min_, this->max_).eval();; } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); Eigen::array<Eigen::IndexPair<int>, 2> double_contraction_product_dims = { Eigen::IndexPair<int>(1,1), Eigen::IndexPair<int>(0,0) }; auto tmp = -source_output_tensor.contract(sink_error_tensor, double_contraction_product_dims); // NOTE: Double contraction along the memory and batch (equivalent to a double sum along the products of the batch and memory dimensions) weight_error_tensor.device(device) += (tmp * weight_error_tensor.constant(TT(1) / (TT)batch_size).eval() * weight_error_tensor.constant(TT(1) / (TT)n_input_nodes)).clip(this->min_, this->max_).eval();; // NOTE: Requires a correction by dividing by the batch size } std::string getName() const { return "MeanWeightGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationWeightGradTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief VarMod integration error function */ template<typename TensorT, typename DeviceT> class VarModWeightGradTensorOp : public IntegrationWeightGradTensorOp<TensorT, DeviceT> { public: VarModWeightGradTensorOp() {}; ~VarModWeightGradTensorOp() {}; void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { operator_(sink_error, source_output, weight, source_input, weight_error, n_input_nodes, batch_size, memory_size, source_layer_size, sink_layer_size, device); }; template<typename TT = TensorT, std::enable_if_t<std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 4>> sink_error_tensor(sink_error, batch_size, memory_size, 1, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 4>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size, 1); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); auto sink_error_bcast = sink_error_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, source_layer_size, 1 })); auto source_output_bcast = source_output_tensor.broadcast(Eigen::array<Eigen::Index, 4>({ 1, 1, 1, sink_layer_size })); auto tmp = -(source_output_bcast * sink_error_bcast).sum(Eigen::array<int, 2>({ 0, 1 })); weight_error_tensor.device(device) += (tmp * weight_error_tensor.constant(TT(1) / (TT)batch_size) * weight_error_tensor.constant(TT(1) / (TT)n_input_nodes).eval() * weight_error_tensor.constant((TT)2)).clip(this->min_, this->max_).eval();; } template<typename TT = TensorT, std::enable_if_t<!std::is_same<TT, double>::value, int> = 0> void operator_(TT* sink_error, TT* source_output, TT* weight, TT* source_input, TT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { Eigen::TensorMap<Eigen::Tensor<TT, 3>> sink_error_tensor(sink_error, batch_size, memory_size, sink_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 3>> source_output_tensor(source_output, batch_size, memory_size, source_layer_size); Eigen::TensorMap<Eigen::Tensor<TT, 2>> weight_error_tensor(weight_error, source_layer_size, sink_layer_size); Eigen::array<Eigen::IndexPair<int>, 2> double_contraction_product_dims = { Eigen::IndexPair<int>(1,1), Eigen::IndexPair<int>(0,0) }; auto tmp = -source_output_tensor.contract(sink_error_tensor, double_contraction_product_dims); // NOTE: Double contraction along the memory and batch (equivalent to a double sum along the products of the batch and memory dimensions) weight_error_tensor.device(device) += (tmp * weight_error_tensor.constant(TT(1) / (TT)batch_size) * weight_error_tensor.constant(TT(1) / (TT)n_input_nodes).eval() * weight_error_tensor.constant((TT)2)).clip(this->min_, this->max_).eval();; // NOTE: Requires a correction by dividing by the batch size } std::string getName() const { return "VarModWeightGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationWeightGradTensorOp<TensorT, DeviceT>>(this)); // } }; /** @brief Var integration error function */ template<typename TensorT, typename DeviceT> class VarWeightGradTensorOp : public IntegrationWeightGradTensorOp<TensorT, DeviceT> { public: VarWeightGradTensorOp() {}; ~VarWeightGradTensorOp() {}; void operator()(TensorT* sink_error, TensorT* source_output, TensorT* weight, TensorT* source_input, TensorT* weight_error, const int& n_input_nodes, const int& batch_size, const int& memory_size, const int& source_layer_size, const int& sink_layer_size, DeviceT& device) { // TODO }; std::string getName() const { return "VarWeightGradTensorOp"; }; //private: // friend class cereal::access; // template<class Archive> // void serialize(Archive& archive) { // archive(cereal::base_class<IntegrationWeightGradTensorOp<TensorT, DeviceT>>(this)); // } }; } //CEREAL_REGISTER_TYPE(EvoNet::SumTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ProdTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MaxTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MeanTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::VarModTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CountTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SumErrorTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ProdErrorTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MaxErrorTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MeanErrorTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::VarModErrorTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CountErrorTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SumWeightGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ProdWeightGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MaxWeightGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CountWeightGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MeanWeightGradTensorOp<float, Eigen::DefaultDevice>); //CEREAL_REGISTER_TYPE(EvoNet::VarModWeightGradTensorOp<float, Eigen::DefaultDevice>); // //#if COMPILE_WITH_CUDA //CEREAL_REGISTER_TYPE(EvoNet::SumTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ProdTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MaxTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MeanTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::VarModTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CountTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SumErrorTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ProdErrorTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MaxErrorTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MeanErrorTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::VarModErrorTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CountErrorTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::SumWeightGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::ProdWeightGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MaxWeightGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::CountWeightGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::MeanWeightGradTensorOp<float, Eigen::GpuDevice>); //CEREAL_REGISTER_TYPE(EvoNet::VarModWeightGradTensorOp<float, Eigen::GpuDevice>); //#endif #endif //EVONET_TENSORINTEGRATIONFUNCTION_H<file_sep>include(${CMAKE_ROOT}/Modules/ExternalProject.cmake) set_property (DIRECTORY PROPERTY EP_BASE Dependencies) set (DEPENDENCIES) set (CEREAL_VERSION master) message (STATUS "Adding Eigen ${CEREAL_VERSION} as an external project.") ExternalProject_Add(cereal GIT_REPOSITORY "https://github.com/USCiLab/cereal.git" #GIT_TAG ${CEREAL_VERSION} # Need the dev branch to compile use MSVC UPDATE_COMMAND "" CONFIGURE_COMMAND "" BUILD_IN_SOURCE 1 BUILD_COMMAND "" INSTALL_COMMAND "" #INSTALL_COMMAND ${CEREAL_INSTALL_CMD} #INSTALL_DIR include )<file_sep>EvoNet Changelog ================ v0.2.0 (2022-01-11) -------------------- Features ~~~~~~~~ - Major updates to the nodes to tensor algorithm `(#102) <https://github.com/dmccloskey/EvoNet/pull/102>`_ - Major updates to the metabolomics examples `(#102) <https://github.com/dmccloskey/EvoNet/pull/102>`_ Fix ~~~ - Clean up of README and other documentation `(#103) <https://github.com/dmccloskey/EvoNet/pull/103>`_ Changes ~~~~~~~ - Changes to CMAKE `(#103) <https://github.com/dmccloskey/EvoNet/pull/103>`_ v0.1.0 (2019-04-08) ------------------- First release of EvoNet <file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Node test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/Node.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(node) BOOST_AUTO_TEST_CASE(constructor) { Node<float>* ptr = nullptr; Node<float>* nullPointer = nullptr; ptr = new Node<float>(); BOOST_CHECK_NE(ptr, nullPointer); delete ptr; } BOOST_AUTO_TEST_CASE(destructor) { Node<float>* ptr = nullptr; ptr = new Node<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(constructor2) { std::shared_ptr<ActivationOp<float>> activation(new TanHOp<float>()); std::shared_ptr<ActivationOp<float>> activation_grad(new TanHGradOp<float>()); std::shared_ptr<IntegrationOp<float>> integration(new ProdOp<float>()); std::shared_ptr<IntegrationErrorOp<float>> integration_error(new ProdErrorOp<float>()); std::shared_ptr<IntegrationWeightGradOp<float>> integration_weight_grad(new ProdWeightGradOp<float>()); Node<float> node("1", NodeType::bias, NodeStatus::initialized, activation, activation_grad, integration, integration_error, integration_weight_grad); BOOST_CHECK_EQUAL(node.getId(), -1); BOOST_CHECK_EQUAL(node.getName(), "1"); BOOST_CHECK_EQUAL(node.getModuleId(), -1); BOOST_CHECK_EQUAL(node.getModuleName(), ""); BOOST_CHECK(node.getType() == NodeType::bias); BOOST_CHECK(node.getStatus() == NodeStatus::initialized); BOOST_CHECK_EQUAL(node.getActivation(), activation.get()); BOOST_CHECK_EQUAL(node.getActivationGrad(), activation_grad.get()); BOOST_CHECK_EQUAL(node.getIntegration(), integration.get()); BOOST_CHECK_EQUAL(node.getIntegrationError(), integration_error.get()); BOOST_CHECK_EQUAL(node.getIntegrationWeightGrad(), integration_weight_grad.get()); } BOOST_AUTO_TEST_CASE(comparison) { Node<float> node, node_test; BOOST_CHECK(node == node_test); node = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); node.setId(1); node_test = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); node_test.setId(1); BOOST_CHECK(node == node_test); node.setId(2); BOOST_CHECK(node != node_test); // Check name node = Node<float>("2", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); node.setId(1); BOOST_CHECK(node != node_test); // Check ActivationOp node = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ELUOp<float>>(ELUOp<float>()), std::make_shared<ELUGradOp<float>>(ELUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); BOOST_CHECK(node != node_test); // Check NodeStatus node = Node<float>("1", NodeType::hidden, NodeStatus::activated, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); BOOST_CHECK(node != node_test); // Check NodeType node = Node<float>("1", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); BOOST_CHECK(node != node_test); // CheckNode IntegrationOp node = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<ProdOp<float>>(ProdOp<float>()), std::make_shared<ProdErrorOp<float>>(ProdErrorOp<float>()), std::make_shared<ProdWeightGradOp<float>>(ProdWeightGradOp<float>())); BOOST_CHECK(node != node_test); } BOOST_AUTO_TEST_CASE(gettersAndSetters) { Node<float> node; node.setId(1); node.setName("Node1"); node.setType(NodeType::hidden); node.setStatus(NodeStatus::initialized); std::shared_ptr<ActivationOp<float>> activation(new TanHOp<float>()); std::shared_ptr<ActivationOp<float>> activation_grad(new TanHGradOp<float>()); node.setActivation(activation); node.setActivationGrad(activation_grad); std::shared_ptr<IntegrationOp<float>> integration(new ProdOp<float>()); std::shared_ptr<IntegrationErrorOp<float>> integration_error(new ProdErrorOp<float>()); std::shared_ptr<IntegrationWeightGradOp<float>> integration_weight_grad(new ProdWeightGradOp<float>()); node.setIntegration(integration); node.setIntegrationError(integration_error); node.setIntegrationWeightGrad(integration_weight_grad); node.setModuleId(4); node.setModuleName("Module1"); node.setDropProbability(1.0f); BOOST_CHECK_EQUAL(node.getId(), 1); BOOST_CHECK_EQUAL(node.getName(), "Node1"); BOOST_CHECK(node.getType() == NodeType::hidden); BOOST_CHECK(node.getStatus() == NodeStatus::initialized); BOOST_CHECK_EQUAL(node.getActivation(), activation.get()); BOOST_CHECK_EQUAL(node.getActivationGrad(), activation_grad.get()); BOOST_CHECK_EQUAL(node.getIntegration(), integration.get()); BOOST_CHECK_EQUAL(node.getIntegrationError(), integration_error.get()); BOOST_CHECK_EQUAL(node.getIntegrationWeightGrad(), integration_weight_grad.get()); BOOST_CHECK_EQUAL(node.getModuleId(), 4); BOOST_CHECK_EQUAL(node.getModuleName(), "Module1"); BOOST_CHECK_EQUAL(node.getDropProbability(), 1.0f); // Check smart pointer data modification BOOST_CHECK_CLOSE(node.getActivation()->getEps(), 1e-6, 1e-3); BOOST_CHECK_CLOSE(node.getActivationGrad()->getEps(), 1e-6, 1e-3); BOOST_CHECK_CLOSE(node.getIntegration()->getEps(), 1e-6, 1e-3); BOOST_CHECK_CLOSE(node.getIntegrationError()->getEps(), 1e-6, 1e-3); BOOST_CHECK_CLOSE(node.getIntegrationWeightGrad()->getEps(), 1e-6, 1e-3); activation->setEps(1); activation_grad->setEps(1); activation->setEps(1); activation->setEps(1); activation->setEps(1); BOOST_CHECK_EQUAL(node.getActivation()->getEps(), activation->getEps()); BOOST_CHECK_EQUAL(node.getActivationGrad()->getEps(), activation_grad->getEps()); BOOST_CHECK_EQUAL(node.getIntegration()->getEps(), integration->getEps()); BOOST_CHECK_EQUAL(node.getIntegrationError()->getEps(), integration_error->getEps()); BOOST_CHECK_EQUAL(node.getIntegrationWeightGrad()->getEps(), integration_weight_grad->getEps()); // Check smart pointer re-assignment activation.reset(new ReLUOp<float>()); activation_grad.reset(new ReLUGradOp<float>()); integration.reset(new SumOp<float>()); integration_error.reset(new SumErrorOp<float>()); integration_weight_grad.reset(new SumWeightGradOp<float>()); BOOST_CHECK_NE(node.getActivation(), activation.get()); BOOST_CHECK_NE(node.getActivationGrad(), activation_grad.get()); BOOST_CHECK_NE(node.getIntegration(), integration.get()); BOOST_CHECK_NE(node.getIntegrationError(), integration_error.get()); BOOST_CHECK_NE(node.getIntegrationWeightGrad(), integration_weight_grad.get()); } BOOST_AUTO_TEST_CASE(gettersAndSetters2) { Node<float> node; node.setId(1); Eigen::Tensor<float, 2> output(2, 5), input(2, 5), derivative(2, 5), error(2, 5), dt(2, 5); output.setZero(); input.setConstant(1); derivative.setConstant(2); error.setConstant(3); dt.setConstant(4); node.setOutput(output); node.setInput(input); node.setDerivative(derivative); node.setError(error); node.setDt(dt); BOOST_CHECK_EQUAL(node.getInput()(0, 0), 1.0); BOOST_CHECK_EQUAL(node.getInput()(1, 4), 1.0); BOOST_CHECK_EQUAL(node.getOutput()(0,0), 0.0); BOOST_CHECK_EQUAL(node.getOutput()(1,4), 0.0); BOOST_CHECK_EQUAL(node.getDerivative()(0,0), 2.0); BOOST_CHECK_EQUAL(node.getDerivative()(1,4), 2.0); BOOST_CHECK_EQUAL(node.getError()(0,0), 3.0); BOOST_CHECK_EQUAL(node.getError()(1,4), 3.0); BOOST_CHECK_EQUAL(node.getDt()(0,0), 4.0); BOOST_CHECK_EQUAL(node.getDt()(1,4), 4.0); } BOOST_AUTO_TEST_CASE(assignment) { Node<float> node; node.setId(1); node.setName("Node1"); node.setType(NodeType::hidden); node.setStatus(NodeStatus::initialized); std::shared_ptr<ActivationOp<float>> activation(new TanHOp<float>()); std::shared_ptr<ActivationOp<float>> activation_grad(new TanHGradOp<float>()); node.setActivation(activation); node.setActivationGrad(activation_grad); std::shared_ptr<IntegrationOp<float>> integration(new ProdOp<float>()); std::shared_ptr<IntegrationErrorOp<float>> integration_error(new ProdErrorOp<float>()); std::shared_ptr<IntegrationWeightGradOp<float>> integration_weight_grad(new ProdWeightGradOp<float>()); node.setIntegration(integration); node.setIntegrationError(integration_error); node.setIntegrationWeightGrad(integration_weight_grad); node.setModuleId(4); node.setModuleName("Module1"); node.setDropProbability(1.0f); // Check assignment #1 (copied references) Node<float> node2(node); BOOST_CHECK_EQUAL(node.getId(), node2.getId()); BOOST_CHECK_EQUAL(node.getName(), node2.getName()); BOOST_CHECK(node.getType() == node2.getType()); BOOST_CHECK(node.getStatus() == node2.getStatus()); BOOST_CHECK_NE(node.getActivation(), node2.getActivation()); BOOST_CHECK_NE(node.getActivationGrad(), node2.getActivationGrad()); BOOST_CHECK_NE(node.getIntegration(), node2.getIntegration()); BOOST_CHECK_NE(node.getIntegrationError(), node2.getIntegrationError()); BOOST_CHECK_NE(node.getIntegrationWeightGrad(), node2.getIntegrationWeightGrad()); BOOST_CHECK_EQUAL(node.getModuleId(), node2.getModuleId()); BOOST_CHECK_EQUAL(node.getModuleName(), node2.getModuleName()); BOOST_CHECK_EQUAL(node.getDropProbability(), node2.getDropProbability()); // Check assignment #2 (shared references) Node<float> node3 = node; BOOST_CHECK_EQUAL(node.getId(), node3.getId()); BOOST_CHECK_EQUAL(node.getName(), node3.getName()); BOOST_CHECK(node.getType() == node3.getType()); BOOST_CHECK(node.getStatus() == node3.getStatus()); BOOST_CHECK_NE(node.getActivation(), node2.getActivation()); BOOST_CHECK_NE(node.getActivationGrad(), node2.getActivationGrad()); BOOST_CHECK_NE(node.getIntegration(), node2.getIntegration()); BOOST_CHECK_NE(node.getIntegrationError(), node2.getIntegrationError()); BOOST_CHECK_NE(node.getIntegrationWeightGrad(), node2.getIntegrationWeightGrad()); BOOST_CHECK_EQUAL(node.getModuleId(), node3.getModuleId()); BOOST_CHECK_EQUAL(node.getModuleName(), node3.getModuleName()); BOOST_CHECK_EQUAL(node.getDropProbability(), node3.getDropProbability()); } // [TODO: broke when adding NodeData] //BOOST_AUTO_TEST_CASE(initNode2) //{ // Node<float> node; // node.setId(1); // node.setType(NodeType::hidden); // // node.setDropProbability(0.0f); // node.initNode(2, 5); // Eigen::Tensor<float, 2> drop_test(2, 5); // drop_test.setConstant(4.0f); // node.setOutput(drop_test); // BOOST_CHECK_EQUAL(node.getOutput()(0, 0), 4.0); // BOOST_CHECK_EQUAL(node.getOutput()(1, 4), 4.0); // // node.setDropProbability(1.0f); // node.initNode(2, 5); // BOOST_CHECK_EQUAL(node.getOutput()(0, 0), 0.0); // BOOST_CHECK_EQUAL(node.getOutput()(1, 4), 0.0); //} BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELINTERPRETER_H #define EVONET_MODELINTERPRETER_H // .h #include <EvoNet/ml/Model.h> #include <EvoNet/ml/NodeTensorData.h> #include <EvoNet/ml/WeightTensorData.h> #include <EvoNet/ml/IntegrationFunctionTensor.h> #include <EvoNet/ml/ActivationFunctionTensor.h> #include <EvoNet/ml/SolverTensor.h> #include <EvoNet/ml/LossFunctionTensor.h> #include <EvoNet/ml/OpToTensorOp.h> #include <EvoNet/ml/ModelResources.h> #include <unsupported/Eigen/CXX11/Tensor> #include <vector> #include <map> #include <set> #include <cereal/access.hpp> // serialiation of private members #include <cereal/types/memory.hpp> #include <cereal/types/map.hpp> #include <cereal/types/utility.hpp> // std::pair #include <cereal/types/vector.hpp> // .cpp #include <EvoNet/ml/ModelErrorData.h> #include <EvoNet/ml/ModelKernal.h> #include <stdexcept> namespace EvoNet { /* Structures required to identify node operations */ template<typename TensorT> struct OperationResult { std::shared_ptr<Node<TensorT>> sink_node; int time_step = 0; template<class Archive> void serialize(Archive& archive) { archive(sink_node, time_step); } }; template<typename TensorT> struct OperationArguments { std::shared_ptr<Node<TensorT>> source_node; std::shared_ptr<Weight<TensorT>> weight; std::string link_name; int time_step = 0; template<class Archive> void serialize(Archive& archive) { archive(source_node, weight, link_name, time_step); } }; template<typename TensorT> struct OperationList { OperationResult<TensorT> result; std::vector<OperationArguments<TensorT>> arguments; int operation_index = -1; template<class Archive> void serialize(Archive& archive) { archive(result, arguments, operation_index); } }; /* Structures required for layer operations */ template<typename TensorT, typename DeviceT> class OperationLayer { public: int tensor_index = 0; int time_step = 0; std::shared_ptr<IntegrationTensorOp<TensorT, DeviceT>> integration = nullptr; std::shared_ptr<IntegrationErrorTensorOp<TensorT, DeviceT>> integration_error = nullptr; std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, DeviceT>> integration_weight_grad = nullptr; std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> activation = nullptr; std::shared_ptr<ActivationTensorOp<TensorT, DeviceT>> activation_grad = nullptr; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(tensor_index, time_step, integration, integration_error, integration_weight_grad, activation, activation_grad); } }; template<typename TensorT, typename DeviceT> class OperationWeight { public: int tensor_index = 0; std::shared_ptr<WeightInitOp<TensorT>> weight_init = nullptr; std::shared_ptr<SolverTensorOp<TensorT, DeviceT>> solver = nullptr; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(tensor_index, weight_init, solver); } }; /* Class used for layer operations */ template<typename TensorT, typename DeviceT> class OperationTensorStep { public: OperationLayer<TensorT, DeviceT> sink_layer; OperationLayer<TensorT, DeviceT> source_layer; OperationWeight<TensorT, DeviceT> weight; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(sink_layer, source_layer, weight); } }; /** @brief Directed Network Model Interpreter Assumptions about the model structure: 1. Inputs can only be sources 2. Outputs can only be sinks (will break back propogation algorithm) */ template<typename TensorT, typename DeviceT> class ModelInterpreter { public: ModelInterpreter() = default; ///< Default constructor ModelInterpreter(const ModelInterpreter& other); ///< Copy constructor that does not create a shared memory address between model nodes/links/weights ModelInterpreter(const ModelResources& model_resources); ///< Copy constructor that does not create a shared memory address between model nodes/links/weights ~ModelInterpreter() = default; ///< Default destructor inline bool operator==(const ModelInterpreter& other) const { // BUG: // - 'operator __surrogate_func': no matching overloaded function found // - Failed to specialize function template 'unknown-type std::equal_to<void>::operator ()(_Ty1 && _Ty2 &&) const' return std::tie( //operation_steps_, //layer_tensors_, //weight_tensors_, //model_error_, //model_resources_ ) == std::tie( //other.operation_steps_, //other.layer_tensors_, //other.weight_tensors_, //other.model_error_, //other.model_resources_ ); } inline bool operator!=(const ModelInterpreter& other) const { return !(*this == other); } /** @brief Copy assignment operator that creates a new model with different memory addresses */ inline ModelInterpreter& operator=(const ModelInterpreter& other) { model_resources_ = other.model_resources_; return *this; } /** @brief Assigns output or error values to the nodes. The node statuses are then changed accordingly (i.e., status_update of "activated" will update the output values of the node and status_update of "corrected" will update the error values of the node. dimensions of batch size by memory size by nodes @param[in] values Values to assign to the node @param[in] node_names @param[in] value_type ("output", "derivative", "error", or "dt") */ void mapValuesToLayers( Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& values, const std::vector<std::string>& node_names, const std::string& value_type); /** @brief Initializes the bias nodes to an output of 1 The reason this is currently required is that layers are not seperated by NodeType. This optimization has the side-effect that bias nodes may not be initialized to 1, but instead 0. To correct for this, we seperately initialize them here. dimensions of batch size by memory size by nodes @param[in] model */ void initBiases(Model<TensorT>& model); /** @brief Initializes Node Output, Input, Derivative, and Error tensors to 0 */ void reInitNodes(); /** @brief Initializes Model Error to 0 */ void reInitModelError(); /** @brief A prelude to a forward propogation step. Returns a vector of links and associated nodes that satisfy the following conditions: 1. all sink output values are unknown (i.e. inactive), 2. all source node output values are known (i.e. active). 3. all nodes need not be the same type @param[out] FP_operations_map Key/Value pair of sink node name to FP_operations index @param[out] FP_operations */ void getNextInactiveLayer(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::vector<OperationList<TensorT>>& FP_operations); void getNextInactiveLayerWOBiases(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::vector<OperationList<TensorT>>& FP_operations); /** @brief Continuation of the forward propogation step that identifies all biases for the identified sink nodes. Returns a vector of links and associated nodes that satisfy the following conditions: 1. all sink output values are unknown (i.e. inactive), 2. all source node output values are known (i.e. active) and biases. @param[out] FP_operations_map Key/Value pair of sink node name to FP_peroations index @param[out] FP_operations @param[out] sink_nodes_with_biases */ void getNextInactiveLayerBiases(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::vector<OperationList<TensorT>>& FP_operations, std::vector<std::string>& sink_nodes_with_biases ); /** @brief Continuation of the forward propogation step that identifies all cyclic source nodes for the identified sink nodes. Returns a vector of links and associated nodes that satisfy the following conditions: 1. all sink output values are unknown (i.e. inactive), 2. all source node output values are unknown (i.e. inactive). @param[out] FP_operations_map Key/Value pair of sink node name to FP_peroations index @param[out] FP_operations @param[out] sink_nodes_with_cycles */ void getNextInactiveLayerCycles(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::vector<OperationList<TensorT>>& FP_operations, std::set<std::string>& sink_nodes_with_cycles); /** @brief Prunes identified cyclic nodes that are not in fact part of a cycle but are instead not yet activated and not yet ready to fire. @param[out] FP_operations_map Key/Value pair of sink node name to FP_peroations index @param[out] FP_operations @param[out] sink_nodes_with_cycles */ void pruneInactiveLayerCycles(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::map<std::string, int>& FP_operations_map_cycles, std::vector<OperationList<TensorT>>& FP_operations, std::vector<OperationList<TensorT>>& FP_operations_cycles, std::set<std::string>& sink_nodes_with_cycles); /** @brief Expands the current operation list to satisfy the following assumptions: 1. arguments for a given sink node have the same time-step/activation/node_integration 2. all links have the same solver and weight_init operator 3. arguments are not a mix of nodes from pre-identified layers and nodes that have not yet been partitioned into a layer [TODO: add tests!] @param[in] FP_operations @param[out] FP_operations_expanded Expanded FP_operations list */ void expandAllForwardPropogationOperations(const std::vector<OperationList<TensorT>>& FP_operations, std::vector<OperationList<TensorT>>& FP_operations_expanded); /** @brief Re-organizes the identified layers into tensors and attempts to optimizes the layer operations to maximize hardware acceleration. [TODO: add tests] @param[in] FP_operations @param[in] identified_sink_nodes Set of identified sink nodes @param[in] fast_check Skips the most time intensive check required for models without layer name specifications @returns map of identified operations consisting of the identifying sink node name or module name for the operation and a list of indices corresponding to the operations in FP_operations */ std::map<std::string, std::vector<int>> getTensorOperations(const std::vector<OperationList<TensorT>>& FP_operations, std::set<std::string>& identified_sink_nodes, const bool& fast_check); bool checkPreviousOperations_(const std::vector<OperationList<TensorT>>& FP_operations, std::map<std::string, std::vector<int>>& operations_map, const int& operations_iter1, const int& operations_iter2); bool checkFutureOperations_(const std::vector<OperationList<TensorT>>& FP_operations, const std::string& sink_ops_key_1, const std::string& sink_ops_key_2, const int& operations_iter1, const int& operations_iter2, const std::set<std::string>& identified_sink_nodes); /** @brief Estimate the forward propogation layer dimensions. The method determines what each node and weight tensor size is as well as whether they need to be made. TODO: additional descriptions @param[in] FP_operations @param[in] operations_map @param[out] source_layer_sizes @param[out] sink_layer_sizes @param[out] weight_indices @param[out] shared_weight_indices @param[out] weight_values @param[out] make_source_tensors @param[out] make_sink_tensors @param[out] make_weight_tensors @param[out] batch_size @param[out] memory_size @param[out] train TODO... */ void getForwardPropogationLayerTensorDimensions(const std::vector<OperationList<TensorT>>& FP_operations, const std::map<std::string, std::vector<int>>& operations_map, std::vector<int>& source_layer_sizes, std::vector<int>& sink_layer_sizes, std::vector<std::vector<std::pair<int, int>>>& weight_indices, std::vector<std::map<std::string, std::vector<std::pair<int, int>>>>& shared_weight_indices, std::vector<std::vector<TensorT>>& weight_values, std::vector<bool>& make_source_tensor, std::vector<bool>& make_sink_tensor, std::vector<bool>& make_weight_tensor, std::vector<int>& source_layer_pos, std::vector<int>& sink_layer_pos, std::map<int, int>& layer_pos_max_size, std::map<std::string, int>& layer_name_pos, const int& tensor_layers_cnt, const int& weight_layers_cnt); /** @brief Allocate memory for all node and weight tensors @param[in] FP_operations @param[in] tensor_ops_steps */ void setForwardPropogationLayerTensors_(const std::vector<OperationList<TensorT>>& FP_operations, const std::vector<std::map<std::string, std::vector<int>>>& tensor_ops_steps, const int& batch_size, const int& memory_size, const bool& train); /** @brief Create a unique key to different nodes by time_step, node_integration, and node_activation methods @param[in] time_step @param[in] node_type [Currently not used] @param[in] node_integration @param[in] node_activation @param[in] node_layer_name @param[in] node_layer_index @param[in] weight_layer_name */ static std::string makeForwardPropogationOperationsKey(const int & time_step, const NodeType& node_type, const std::string & node_integration, const std::string & node_activation, const std::string& node_layer_name, const int& node_layer_index, const std::string& weight_layer_name); /** @brief Convert a graph model to sequence of tensor operations @param[in, out] model Network model @param[in] batch_size Batch size @param[in] memory_size Memory size @param[in] train Boolean to indicate training or testing (needed for dropout or drop connection) @param[in] fast_check Boolean to use a faster but incomplete tensor compatibility check when manually specifying layers @param[in] find_cycles Boolean to search for cyclic nodes @param[in] preserve_OoO Boolean to indicate whether the order of operation (OoO) of the model should be preserved (true) or the model should be treated as a graph where all operations happen simultaneously (false) */ void getForwardPropogationOperations(Model<TensorT>& model, const int& batch_size, const int& memory_size, const bool& train, const bool& fast_check, const bool& find_cycles, const bool& preserve_OoO); /** @brief Convert a graph model to sequence of tensor operations preserving the order of operations @param[in, out] model Network model @param[in] find_cycles Boolean to search for cyclic nodes @param[out] FP_operations_expanded List of forward (and reverse) operations @param[out] iter Number of operations */ void getFPOpsOoO_(Model<TensorT>& model, std::vector<OperationList<TensorT>>& FP_operations_expanded, int& iter); /** @brief Convert a graph model to sequence of tensor operations without preserving the order of operations @param[in, out] model Network model @param[in] find_cycles Boolean to search for cyclic nodes @param[out] FP_operations_expanded List of forward (and reverse) operations @param[out] iter Number of operations */ void getFPOpsGraph_(Model<TensorT>& model, std::vector<OperationList<TensorT>>& FP_operations_expanded, int& iter); /** @brief Allocate Node and Weight tensor memory for all model operations. Source and sink layer activations are created using the first node in the layers, respecively. Source and sink layer integrations are created using the first node in the layers, respectively. Weight solver params tensors are created using the first weight in the layer. Weight matrix is initialized using the first weight in the layer. @param[in] FP_operations @param[in] operations_map @param[in] source_layer_sizes @param[in] sink_layer_sizes @param[in] weight_indices @param[in] shared_weight_indices @param[in] weight_values @param[in] make_source_tensors @param[in] make_sink_tensors @param[in] make_weight_tensors @param[in] batch_size @param[in] memory_size @param[in] train */ virtual void allocateForwardPropogationLayerTensors(const std::vector<OperationList<TensorT>>& FP_operations, const std::map<std::string, std::vector<int>>& operations_map, const std::vector<int>& source_layer_sizes, const std::vector<int>& sink_layer_sizes, const std::vector<std::vector<std::pair<int, int>>> weight_indices, std::vector<std::map<std::string, std::vector<std::pair<int, int>>>>& shared_weight_indices, const std::vector<std::vector<TensorT>>& weight_values, const std::vector<bool>& make_source_tensors, const std::vector<bool>& make_sink_tensors, const std::vector<bool>& make_weight_tensors, const int& batch_size, const int& memory_size, const bool& train) = 0; /** @brief Execute model kernal methods required for forward propogation @param[in] time_step The current time-step to operate on */ virtual void executeForwardPropogationOperations(const int& time_step) = 0; /** @brief Execute model kernal methods required for calculating the model and output node error @param[in] time_step The current time-step to operate on */ virtual void executeModelErrorOperations(Eigen::Tensor<TensorT, 2>& expected, const int& layer_id, std::shared_ptr<LossFunctionTensorOp<TensorT,DeviceT>>& loss_function, std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>>& loss_function_grad, const int& time_step) = 0; /** @brief Execute model kernal methods required for calculating the model metrics (e.g., accuracy) @param[in] time_step The current time-step to operate on */ virtual void executeModelMetricOperations(Eigen::Tensor<TensorT, 2>& expected, const int& layer_id, std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> metric_function, const int& time_step, const int& metric_index) = 0; /** @brief Execute model kernal methods required for backward propogation @param[in] time_step The current time-step to operate on */ virtual void executeBackwardPropogationOperations(const int& time_step) = 0; /** @brief Execute model kernal methods required for weight error calculations */ virtual void executeWeightErrorOperations() = 0; /** @brief Execute model kernal methods required for weight update calculations @param[in] iter The number of training iterations */ virtual void executeWeightUpdateOperations(const int& iter) = 0; void addLayerTensor(std::shared_ptr<NodeTensorData<TensorT, DeviceT>>& layer); ///< add a layer to the cache void clearLayerTensors(); ///< clear all layers from the cache std::shared_ptr<NodeTensorData<TensorT, DeviceT>> getLayerTensor(const int& layer_index); ///< get a layer from the cache void addWeightTensor(std::shared_ptr<WeightTensorData<TensorT, DeviceT>>& weight); ///< add a weight to the cache void clearWeightTensors(); ///< clear all weights from the cache std::shared_ptr<WeightTensorData<TensorT, DeviceT>> getWeightTensor(const int& weight_index); ///< get a weight from the cache virtual void allocateModelErrorTensor(const int& batch_size, const int& memory_size, const int& n_metrics) = 0; ///< set the model error std::shared_ptr<ModelErrorData<TensorT, DeviceT>> getModelError(); ///< get the model error void addOperationSteps(const std::vector<OperationTensorStep<TensorT, DeviceT>>& operation_steps); std::vector<OperationTensorStep<TensorT, DeviceT>> getOperationSteps(const int& operation_index); void clearOperationSteps(); ///< clear the operations caches /** @brief Foward propogation through time (FPTT) of the network model. @param[in] time_steps The number of time_steps forward to continuously calculate node outputs. @param[in] values Input values at each time step where dim0: batch_size, dim1: time_step, and dim2: nodes. @param[in] node_names @param[in] dt Node time resolution */ void FPTT(const int& time_steps); /** @brief Calculates the error of the model through time (CETT) with respect to the expected values @param[in] values Expected node output values (dim0: batch_size, dim1: memory_size, dim2: output nodes) where t=n to t=0 @param[in] node_names Output nodes */ void CETT(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& values, const std::vector<std::string>& node_names, std::shared_ptr<LossFunctionOp<TensorT>>& loss_function, std::shared_ptr<LossFunctionGradOp<TensorT>>& loss_function_grad, const int& time_steps); /** @brief Calculates the metrics of the model through time (CMTT) with respect to the expected values @param[in] values Expected node output values (dim0: batch_size, dim1: memory_size, dim2: output nodes) where t=n to t=0 @param[in] node_names Output nodes @param[in] metric_function The metric function to evaluate on the expected and predicted node values @param[in] time_steps The number of time_steps to evaluate in time @param[in] metric_index The index of the metric function to evaluate */ void CMTT(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& values, const std::vector<std::string>& node_names, std::shared_ptr<MetricFunctionOp<TensorT>>& metric_function, const int& time_steps, const int& metric_index); /** @brief Truncated Back Propogation Through Time (TBPTT) of the network model. @param[in] time_steps The number of time_steps backwards to unfold the network model. */ void TBPTT(const int& time_steps); /** @brief Recurrent Real Time Learning (RTRL) of the network model. @param[in] time_steps The number of time_steps backwards to unfold the network model. */ void RTRL(const int& time_steps); /** @brief Update the weights @param[in] iter The number of training iterations */ void updateWeights(const int& iter); /** @brief Transfer Model error, weights, and output node values from the model interpreter to the model @param[in, out] model The network model */ virtual void getModelResults(Model<TensorT>& model, const bool& output_nodes, const bool& weights, const bool& model_error, const bool& input_nodes) = 0; /** @brief Update the weight solver params NOTE: this method is only safe for updating the learning rate. More sophisticated checks would need to be implemented for updating other paramaters when multiple solvers can be used. @param[in] param_index The parameter index to update (i.e., 0 for learning rate) @param[in] param_factor The factor to change the parameter value by (i.e., 0.1) */ virtual void updateSolverParams(const int& param_index, const TensorT& param_factor) = 0; void setModelResources(const ModelResources& model_resources); ///< model_resources setter ModelResources getModelResources(); ///< model_resources getter /** @brief Estimate the memory footprint of all Tensor Layers @param[in] model The network model @param[in] batch_size @param[in] memory_size */ virtual void checkMemory(const Model<TensorT>& model, const int& batch_size, const int& memory_size) = 0; /** @brief Clear model interpreter resources including the following structures: - operation_steps - layer_tensors_ - weight_tensors_ - model_error_ - tensor_ops_steps_ - FP_operations_ */ void clear_cache(); std::vector<std::map<std::string, std::vector<int>>> getTensorOpsSteps() const; ///< retrieve the tensor_ops_steps_ std::vector<OperationList<TensorT>> getFPOperations() const; ///< retrieve the FP_operations /** @brief Print the tensor ops steps to the screen for faster debugging of layer allocation errors */ void printTensorOpsSteps(std::string delimiter = "\t") const; protected: std::vector<std::vector<OperationTensorStep<TensorT, DeviceT>>> operation_steps_; std::vector<std::shared_ptr<NodeTensorData<TensorT, DeviceT>>> layer_tensors_; std::vector<std::shared_ptr<WeightTensorData<TensorT, DeviceT>>> weight_tensors_; std::shared_ptr<ModelErrorData<TensorT, DeviceT>> model_error_; ModelResources model_resources_; private: std::vector<std::map<std::string, std::vector<int>>> tensor_ops_steps_; std::vector<OperationList<TensorT>> FP_operations_; friend class cereal::access; //template<class Archive> //void serialize(Archive& archive) { // archive(operation_steps_, layer_tensors_, weight_tensors_, model_error_, model_resources_); //} template<class Archive> void serialize(Archive& archive) { archive(tensor_ops_steps_, FP_operations_, model_resources_); } }; template<typename TensorT, typename DeviceT> ModelInterpreter<TensorT, DeviceT>::ModelInterpreter(const ModelInterpreter<TensorT, DeviceT>& other) { model_resources_ = other.model_resources_; } template<typename TensorT, typename DeviceT> ModelInterpreter<TensorT, DeviceT>::ModelInterpreter(const ModelResources& model_resources): model_resources_(model_resources) { } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::mapValuesToLayers(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& values, const std::vector<std::string>& node_names, const std::string & value_type) { if (layer_tensors_.size() <= 0) { clear_cache(); // clean up before exiting std::string error_char = "Tensor layers have not been created. Cannot map values to layers."; throw std::runtime_error(error_char); } // Buffer the input values Eigen::Tensor<TensorT, 3> values_buffered = values.pad(Eigen::array<std::pair<int, int>, 3>({std::make_pair(0,0),std::make_pair(0,1),std::make_pair(0,0)})); // check dimension mismatches if (node_names.size() != values_buffered.dimension(2)) { clear_cache(); // clean up before exiting const std::string error = "The number of input features " + std::to_string(values_buffered.dimension(2)) + " and the number of nodes " + std::to_string(node_names.size()) + " do not match."; throw std::runtime_error(error); } // assumes the tensors have been cached else if (layer_tensors_[0]->getBatchSize() != values_buffered.dimension(0)) { clear_cache(); // clean up before exiting const std::string error = "The number of input samples " + std::to_string(values_buffered.dimension(0)) + " and the batch size " + std::to_string(layer_tensors_[0]->getBatchSize()) + " do not match."; throw std::runtime_error(error); } else if (layer_tensors_[0]->getMemorySize() != values_buffered.dimension(1)){ clear_cache(); // clean up before exiting const std::string error = "The number of input time steps " + std::to_string(values_buffered.dimension(1)) + " and the memory size " + std::to_string(layer_tensors_[0]->getMemorySize()) + " do not match."; throw std::runtime_error(error); } for (int i = 0; i < node_names.size(); ++i){ auto node = model.nodes_.at(node_names[i]); if (node->getTensorIndex().first != -1) { // copy over the values if (value_type == "output") getLayerTensor(node->getTensorIndex().first)->getOutput().chip(node->getTensorIndex().second, 2) = values_buffered.chip(i, 2); if (value_type == "input") getLayerTensor(node->getTensorIndex().first)->getInput().chip(node->getTensorIndex().second, 2) = values_buffered.chip(i, 2); else if (value_type == "error") getLayerTensor(node->getTensorIndex().first)->getError().chip(node->getTensorIndex().second, 2) = values_buffered.chip(i, 2); else if (value_type == "derivative") getLayerTensor(node->getTensorIndex().first)->getDerivative().chip(node->getTensorIndex().second, 2) = values_buffered.chip(i, 2); else if (value_type == "dt") getLayerTensor(node->getTensorIndex().first)->getDt().chip(node->getTensorIndex().second, 2) = values_buffered.chip(i, 2); } else { clear_cache(); // clean up before exiting const std::string error = "Node " + node->getName() + " has not been assigned a tensor index!"; throw std::runtime_error(error); // Error is cause by an added recursive link that "blocks" forward propogation } } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::initBiases(Model<TensorT>& model) { if (layer_tensors_.size() <= 0) { clear_cache(); // clean up before exiting std::string error_char = "Tensor layers have not been created. Cannot initiate biases."; std::cout << error_char << std::endl; throw std::runtime_error(error_char); } Eigen::Tensor<TensorT, 2> one((int)layer_tensors_[0]->getBatchSize(), (int)layer_tensors_[0]->getMemorySize()); one.setConstant((TensorT)1); Eigen::Tensor<TensorT, 2> zero((int)layer_tensors_[0]->getBatchSize(), (int)layer_tensors_[0]->getMemorySize()); zero.setConstant((TensorT)0); for (auto& node_map : model.nodes_) { if (node_map.second->getType() == NodeType::bias) { if (node_map.second->getTensorIndex().first != -1) { getLayerTensor(node_map.second->getTensorIndex().first)->getOutput().chip(node_map.second->getTensorIndex().second, 2) = one; getLayerTensor(node_map.second->getTensorIndex().first)->getInput().chip(node_map.second->getTensorIndex().second, 2) = one; } else { clear_cache(); // clean up before exiting std::string error_char = "Node " + node_map.second->getName() + " has not been assigned a tensor index!"; std::cout << error_char << std::endl; throw std::runtime_error(error_char); // Error is cause by an added recursive link that "blocks" forward propogation } } else if (node_map.second->getType() == NodeType::zero) { if (node_map.second->getTensorIndex().first != -1) { getLayerTensor(node_map.second->getTensorIndex().first)->getOutput().chip(node_map.second->getTensorIndex().second, 2) = zero; getLayerTensor(node_map.second->getTensorIndex().first)->getInput().chip(node_map.second->getTensorIndex().second, 2) = zero; } else { clear_cache(); // clean up before exiting std::string error_char = "Node " + node_map.second->getName() + " has not been assigned a tensor index!"; std::cout << error_char << std::endl; throw std::runtime_error(error_char); // Error is cause by an added recursive link that "blocks" forward propogation } } } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::reInitNodes() { for (auto& layer_tensor: layer_tensors_) { Eigen::Tensor<TensorT, 3> zero((int)layer_tensor->getBatchSize(), (int)layer_tensor->getMemorySize(), (int)layer_tensor->getLayerSize()); zero.setConstant((TensorT)0); Eigen::Tensor<TensorT, 3> one((int)layer_tensor->getBatchSize(), (int)layer_tensor->getMemorySize(), (int)layer_tensor->getLayerSize()); one.setConstant((TensorT)1); if (layer_tensor->getLayerIntegration() == "ProdOp" || layer_tensor->getLayerIntegration() == "ProdSCOp") { layer_tensor->setInput(one); layer_tensor->setOutput(zero); layer_tensor->setDerivative(zero); layer_tensor->setError(zero); layer_tensor->setDt(zero); } else { layer_tensor->setInput(zero); layer_tensor->setOutput(zero); layer_tensor->setDerivative(zero); layer_tensor->setError(zero); layer_tensor->setDt(zero); } } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::reInitModelError() { Eigen::Tensor<TensorT, 2> zero((int)model_error_->getBatchSize(), (int)model_error_->getMemorySize()); zero.setConstant((TensorT)0); model_error_->setError(zero); Eigen::Tensor<TensorT, 2> zero_metric((int)model_error_->getNMetrics(), (int)model_error_->getMemorySize()); zero_metric.setConstant((TensorT)0); model_error_->setMetric(zero_metric); } template<typename TensorT, typename DeviceT> void ModelInterpreter<TensorT, DeviceT>::getNextInactiveLayer(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::vector<OperationList<TensorT>>& FP_operations) { // get all links where the source node is active and the sink node is inactive // except for biases for (auto& link_map : model.links_) { if ( //model.nodes_.at(link_map.second->getSourceNodeName())->getType() != NodeType::bias && model.nodes_.at(link_map.second->getSourceNodeName())->getStatus() == NodeStatus::activated && model.nodes_.at(link_map.second->getSinkNodeName())->getStatus() == NodeStatus::initialized) { OperationArguments<TensorT> arguments; arguments.source_node = model.nodes_.at(link_map.second->getSourceNodeName()); arguments.weight = model.weights_.at(link_map.second->getWeightName()); arguments.time_step = 0; arguments.link_name = link_map.first; std::string ops_key = link_map.second->getSinkNodeName(); auto found = FP_operations_map.emplace(ops_key, (int)FP_operations.size()); if (!found.second) { FP_operations[FP_operations_map.at(ops_key)].arguments.push_back(arguments); } else { OperationList<TensorT> operation_list; OperationResult<TensorT> result; result.sink_node = model.nodes_.at(link_map.second->getSinkNodeName()); operation_list.result = result; operation_list.arguments.push_back(arguments); FP_operations.push_back(operation_list); } } } } template<typename TensorT, typename DeviceT> void ModelInterpreter<TensorT, DeviceT>::getNextInactiveLayerWOBiases(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::vector<OperationList<TensorT>>& FP_operations) { // get all links where the source node is active and the sink node is inactive // except for biases for (auto& link_map : model.links_) { if ( model.nodes_.at(link_map.second->getSourceNodeName())->getType() != NodeType::bias && model.nodes_.at(link_map.second->getSourceNodeName())->getStatus() == NodeStatus::activated && model.nodes_.at(link_map.second->getSinkNodeName())->getStatus() == NodeStatus::initialized) { //if (FP_operations.size() == 680) // std::cout << "check" << std::endl; OperationArguments<TensorT> arguments; arguments.source_node = model.nodes_.at(link_map.second->getSourceNodeName()); arguments.weight = model.weights_.at(link_map.second->getWeightName()); arguments.time_step = 0; arguments.link_name = link_map.first; std::string ops_key = link_map.second->getSinkNodeName(); auto found = FP_operations_map.emplace(ops_key, (int)FP_operations.size()); if (!found.second) { FP_operations[FP_operations_map.at(ops_key)].arguments.push_back(arguments); } else { OperationList<TensorT> operation_list; OperationResult<TensorT> result; result.sink_node = model.nodes_.at(link_map.second->getSinkNodeName()); operation_list.result = result; operation_list.arguments.push_back(arguments); FP_operations.push_back(operation_list); } } } } template<typename TensorT, typename DeviceT> void ModelInterpreter<TensorT, DeviceT>::getNextInactiveLayerBiases(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::vector<OperationList<TensorT>>& FP_operations, std::vector<std::string>& sink_nodes_with_biases) { // get all the biases for the sink nodes for (auto& link_map : model.links_) { std::string ops_key = link_map.second->getSinkNodeName(); if ( // does not allow for cycles model.nodes_.at(link_map.second->getSourceNodeName())->getType() == NodeType::bias && model.nodes_.at(link_map.second->getSourceNodeName())->getStatus() == NodeStatus::activated && // required regardless if cycles are or are not allowed model.nodes_.at(link_map.second->getSinkNodeName())->getStatus() == NodeStatus::initialized && FP_operations_map.count(ops_key) != 0 // sink node has already been identified ) { OperationArguments<TensorT> arguments; arguments.source_node = model.nodes_.at(link_map.second->getSourceNodeName()); arguments.weight = model.weights_.at(link_map.second->getWeightName()); arguments.time_step = 0; arguments.link_name = link_map.first; FP_operations[FP_operations_map.at(ops_key)].arguments.push_back(arguments); if (std::count(sink_nodes_with_biases.begin(), sink_nodes_with_biases.end(), ops_key) == 0) { sink_nodes_with_biases.push_back(ops_key); } } } } template<typename TensorT, typename DeviceT> void ModelInterpreter<TensorT, DeviceT>::getNextInactiveLayerCycles(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::vector<OperationList<TensorT>>& FP_operations, std::set<std::string>& sink_nodes_with_cycles) { // get cyclic source nodes for (auto& link_map : model.links_) { std::string ops_key = link_map.second->getSinkNodeName(); if ( model.nodes_.at(link_map.second->getSourceNodeName())->getStatus() == NodeStatus::initialized && // required regardless if cycles are or are not allowed model.nodes_.at(link_map.second->getSinkNodeName())->getStatus() == NodeStatus::initialized && FP_operations_map.count(ops_key) != 0 // sink node has already been identified ) { OperationArguments<TensorT> arguments; arguments.source_node = model.nodes_.at(link_map.second->getSourceNodeName()); arguments.weight = model.weights_.at(link_map.second->getWeightName()); arguments.time_step = 1; arguments.link_name = link_map.first; FP_operations[FP_operations_map.at(ops_key)].arguments.push_back(arguments); sink_nodes_with_cycles.insert(ops_key); } } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::pruneInactiveLayerCycles(Model<TensorT>& model, std::map<std::string, int>& FP_operations_map, std::map<std::string, int>& FP_operations_map_cycles, std::vector<OperationList<TensorT>>& FP_operations, std::vector<OperationList<TensorT>>& FP_operations_cycles, std::set<std::string>& sink_nodes_with_cycles) { // Remove all nodes involved in "cycles" that have arguments // involving source to sink node pairs not identified as cycles if (sink_nodes_with_cycles.size() > 0) { std::vector<std::string> sink_nodes_remove; std::vector<OperationList<TensorT>> FP_operations_copy = FP_operations; for (const std::string& sink_node : sink_nodes_with_cycles) { for (size_t i = FP_operations[FP_operations_map.at(sink_node)].arguments.size(); i < FP_operations_cycles[FP_operations_map_cycles.at(sink_node)].arguments.size(); ++i) { // check if the "cyclic" argument is actually involved in a cycle bool isCyclicOperation = false; for (const auto& cyclic_pair : model.getCyclicPairs()) { if (FP_operations_cycles[FP_operations_map_cycles.at(sink_node)].arguments[i].source_node->getName() == cyclic_pair.first && FP_operations_cycles[FP_operations_map_cycles.at(sink_node)].result.sink_node->getName() == cyclic_pair.second) { isCyclicOperation = true; break; } } // copy over the cyclic operation if (isCyclicOperation) FP_operations_copy[FP_operations_map_cycles.at(sink_node)].arguments.push_back(FP_operations_cycles[FP_operations_map_cycles.at(sink_node)].arguments[i]); // id the sink node for removal else { sink_nodes_remove.push_back(sink_node); break; } } } // remove all identified sink nodes if (sink_nodes_remove.size() > 0) { FP_operations.clear(); for (const auto& FP_operation : FP_operations_copy) if (std::count(sink_nodes_remove.begin(), sink_nodes_remove.end(), FP_operation.result.sink_node->getName()) == 0) FP_operations.push_back(FP_operation); } else FP_operations = FP_operations_copy; } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::expandAllForwardPropogationOperations(const std::vector<OperationList<TensorT>>& FP_operations, std::vector<OperationList<TensorT>>& FP_operations_expanded) { FP_operations_expanded.clear(); for (const OperationList<TensorT>& FP_operation : FP_operations) { for (const OperationArguments<TensorT>& argument : FP_operation.arguments) { OperationList<TensorT> operations_list; operations_list.result = FP_operation.result; operations_list.arguments.push_back(argument); operations_list.operation_index = FP_operation.operation_index; FP_operations_expanded.push_back(operations_list); } } } template<typename TensorT, typename DeviceT> inline std::map<std::string, std::vector<int>> ModelInterpreter<TensorT, DeviceT>::getTensorOperations(const std::vector<OperationList<TensorT>>& FP_operations, std::set<std::string>& identified_sink_nodes, const bool& fast_check) { std::map<std::string, std::vector<int>> FC_layers; for (size_t operations_iter1 = 0; operations_iter1 < FP_operations.size(); ++operations_iter1) { std::string sink_node_key1 = FP_operations[operations_iter1].result.sink_node->getName() + "/" + std::to_string(operations_iter1); if (identified_sink_nodes.count(sink_node_key1)) continue; // Skip identified sink nodes std::map<std::string, std::vector<int>> FC_layers_tmp; std::set<std::string> identified_sink_nodes_tmp; // Check for compatibility for (size_t operations_iter2 = operations_iter1 + 1; operations_iter2 < FP_operations.size(); ++operations_iter2) { std::string sink_node_key2 = FP_operations[operations_iter2].result.sink_node->getName() + "/" + std::to_string(operations_iter2); if (identified_sink_nodes.count(sink_node_key2)) continue; // Skip identified sink nodes // check if the sink nodes are compatible std::string sink_ops_key_1 = makeForwardPropogationOperationsKey(FP_operations[operations_iter1].result.time_step, FP_operations[operations_iter1].result.sink_node->getType(), FP_operations[operations_iter1].result.sink_node->getIntegration()->getName(), FP_operations[operations_iter1].result.sink_node->getActivation()->getName(), FP_operations[operations_iter1].result.sink_node->getLayerName(), FP_operations[operations_iter1].result.sink_node->getTensorIndex().first, FP_operations[operations_iter1].arguments[0].weight->getLayerName()); std::string sink_ops_key_2 = makeForwardPropogationOperationsKey(FP_operations[operations_iter2].result.time_step, FP_operations[operations_iter2].result.sink_node->getType(), FP_operations[operations_iter2].result.sink_node->getIntegration()->getName(), FP_operations[operations_iter2].result.sink_node->getActivation()->getName(), FP_operations[operations_iter2].result.sink_node->getLayerName(), FP_operations[operations_iter2].result.sink_node->getTensorIndex().first, FP_operations[operations_iter2].arguments[0].weight->getLayerName()); if (sink_ops_key_1 != sink_ops_key_2) continue; // check if the source nodes are compatible std::set<std::string> argument1_nodes, argument2_nodes; for (const auto& argument : FP_operations[operations_iter1].arguments) { std::string ops_key = makeForwardPropogationOperationsKey(argument.time_step, argument.source_node->getType(), argument.source_node->getIntegration()->getName(), argument.source_node->getActivation()->getName(), argument.source_node->getLayerName(), argument.source_node->getTensorIndex().first, argument.weight->getLayerName()); argument1_nodes.insert(ops_key); } for (const auto& argument : FP_operations[operations_iter2].arguments) { std::string ops_key = makeForwardPropogationOperationsKey(argument.time_step, argument.source_node->getType(), argument.source_node->getIntegration()->getName(), argument.source_node->getActivation()->getName(), argument.source_node->getLayerName(), argument.source_node->getTensorIndex().first, argument.weight->getLayerName()); argument2_nodes.insert(ops_key); } if (argument1_nodes != argument2_nodes ) continue; // Run a comprehensive check on future and previous layer compatibility if (!fast_check) { if (!checkPreviousOperations_(FP_operations, FC_layers, operations_iter1, operations_iter2)) continue; if (!checkFutureOperations_(FP_operations, sink_ops_key_1, sink_ops_key_2, operations_iter1, operations_iter2, identified_sink_nodes)) continue; } // update the maps identified_sink_nodes_tmp.insert(sink_node_key1); identified_sink_nodes_tmp.insert(sink_node_key2); std::vector<int> first_operation = { (int)operations_iter1 }; auto found = FC_layers_tmp.emplace(sink_node_key1, first_operation); FC_layers_tmp.at(sink_node_key1).push_back(operations_iter2); } // Check if compatible operations were found, if not add as is if (identified_sink_nodes_tmp.count(sink_node_key1) == 0) { identified_sink_nodes.insert(sink_node_key1); std::vector<int> first_operation = { (int)operations_iter1 }; auto found = FC_layers.emplace(sink_node_key1, first_operation); } else { identified_sink_nodes.insert(identified_sink_nodes_tmp.begin(), identified_sink_nodes_tmp.end()); FC_layers.insert(FC_layers_tmp.begin(), FC_layers_tmp.end()); } } return FC_layers; } template<typename TensorT, typename DeviceT> inline bool ModelInterpreter<TensorT, DeviceT>::checkPreviousOperations_(const std::vector<OperationList<TensorT>>& FP_operations, std::map<std::string, std::vector<int>>& operations_map, const int & operations_iter1, const int & operations_iter2) { // Currently determined layer consistency checks std::set<std::string> sinkAsSourceOps_1s, sinkAsSourceOps_2s, sourceAsSourceOps_1s, sourceAsSourceOps_2s, sinkAsSinkOps_1s, sinkAsSinkOps_2s, sourceAsSinkOps_1s, sourceAsSinkOps_2s; for (const auto& ops_map : operations_map) { // The size of the `FC_layers` structure should be much greater than the arguments for (const int ops_index : ops_map.second) { // Check that the previous sink layers of the current sink layer are the same if (FP_operations[ops_index].result.sink_node->getName() == FP_operations[operations_iter1].result.sink_node->getName()) { sinkAsSinkOps_1s.insert(ops_map.first); } if (FP_operations[ops_index].result.sink_node->getName() == FP_operations[operations_iter2].result.sink_node->getName()) { sinkAsSinkOps_2s.insert(ops_map.first); } for (const auto& argument_ops : FP_operations[ops_index].arguments) { // Check that the previous source layers of the current sink layer are the same if (argument_ops.source_node->getName() == FP_operations[operations_iter1].result.sink_node->getName()) { sinkAsSourceOps_1s.insert(ops_map.first); } if (argument_ops.source_node->getName() == FP_operations[operations_iter2].result.sink_node->getName()) { sinkAsSourceOps_2s.insert(ops_map.first); } // Check source node 1 arguments for (const auto& argument1 : FP_operations[operations_iter1].arguments) { std::string ops_key = makeForwardPropogationOperationsKey(argument1.time_step, argument1.source_node->getType(), argument1.source_node->getIntegration()->getName(), argument1.source_node->getActivation()->getName(), argument1.source_node->getLayerName(), argument1.source_node->getTensorIndex().first, argument1.weight->getLayerName()); // Check that the previous sink layers of the current source layer are the same if (FP_operations[ops_index].result.sink_node->getName() == argument1.source_node->getName()) { sourceAsSinkOps_1s.insert(ops_map.first); } // Check that the previous source layers of the current source layer are the same if (argument_ops.source_node->getName() == argument1.source_node->getName()) { sourceAsSourceOps_1s.insert(ops_map.first); } } // Check source node 2 arguments for (const auto& argument2 : FP_operations[operations_iter2].arguments) { std::string ops_key = makeForwardPropogationOperationsKey(argument2.time_step, argument2.source_node->getType(), argument2.source_node->getIntegration()->getName(), argument2.source_node->getActivation()->getName(), argument2.source_node->getLayerName(), argument2.source_node->getTensorIndex().first, argument2.weight->getLayerName()); // Check that the previous sink layers of the current source layer are the same if (FP_operations[ops_index].result.sink_node->getName() == argument2.source_node->getName()) { sourceAsSinkOps_2s.insert(ops_map.first); } // Check that the previous source layers of the current source layer are the same if (argument_ops.source_node->getName() == argument2.source_node->getName()) { sourceAsSourceOps_2s.insert(ops_map.first); } } } } } if (sinkAsSourceOps_1s != sinkAsSourceOps_2s) return false; if (sourceAsSourceOps_1s != sourceAsSourceOps_2s) return false; if (sinkAsSinkOps_1s != sinkAsSinkOps_2s) return false; if (sourceAsSinkOps_1s != sourceAsSinkOps_2s) return false; return true; } template<typename TensorT, typename DeviceT> inline bool ModelInterpreter<TensorT, DeviceT>::checkFutureOperations_(const std::vector<OperationList<TensorT>>& FP_operations, const std::string& sink_ops_key_1, const std::string& sink_ops_key_2, const int & operations_iter1, const int & operations_iter2, const std::set<std::string>& identified_sink_nodes) { // Future operations layer consistency checks std::set<std::string> sinkAsSourceNode_1s, sinkAsSourceNode_2s, sourceAsSourceNode_1s, sourceAsSourceNode_2s, sinkAsSinkNode_1s, sinkAsSinkNode_2s, sourceAsSinkNode_1s, sourceAsSinkNode_2s, opsCompatibility_1s, opsCompatibility_2s, sinkAsSourceSourceNode_1s, sinkAsSourceSourceNode_2s, sourceAsSourceSourceNode_1s, sourceAsSourceSourceNode_2s, sinkAsSinkSinkNode_1s, sinkAsSinkSinkNode_2s, sourceAsSinkSinkNode_1s, sourceAsSinkSinkNode_2s; std::vector<std::string> sinkAsSourceNode_1v, sinkAsSourceNode_2v, sourceAsSourceNode_1v, sourceAsSourceNode_2v, sinkAsSinkNode_1v, sinkAsSinkNode_2v, sourceAsSinkNode_1v, sourceAsSinkNode_2v, sinkAsSourceSourceNode_1v, sinkAsSourceSourceNode_2v, sourceAsSourceSourceNode_1v, sourceAsSourceSourceNode_2v, sinkAsSinkSinkNode_1v, sinkAsSinkSinkNode_2v, sourceAsSinkSinkNode_1v, sourceAsSinkSinkNode_2v; // Operations key without the time step information std::string sink_ops_key_1_no_t = makeForwardPropogationOperationsKey(0, FP_operations[operations_iter1].result.sink_node->getType(), FP_operations[operations_iter1].result.sink_node->getIntegration()->getName(), FP_operations[operations_iter1].result.sink_node->getActivation()->getName(), FP_operations[operations_iter1].result.sink_node->getLayerName(), FP_operations[operations_iter1].result.sink_node->getTensorIndex().first, FP_operations[operations_iter1].arguments[0].weight->getLayerName()); std::string sink_ops_key_2_no_t = makeForwardPropogationOperationsKey(0, FP_operations[operations_iter2].result.sink_node->getType(), FP_operations[operations_iter2].result.sink_node->getIntegration()->getName(), FP_operations[operations_iter2].result.sink_node->getActivation()->getName(), FP_operations[operations_iter2].result.sink_node->getLayerName(), FP_operations[operations_iter2].result.sink_node->getTensorIndex().first, FP_operations[operations_iter2].arguments[0].weight->getLayerName()); // Operation 3 checks for (size_t operations_iter3 = operations_iter1; operations_iter3 < FP_operations.size(); ++operations_iter3) { std::string sink_node_key3 = FP_operations[operations_iter3].result.sink_node->getName() + "/" + std::to_string(operations_iter3); if (identified_sink_nodes.count(sink_node_key3)) continue; //if (operations_iter3 == operations_iter2 || operations_iter3 == operations_iter1) continue; // Skip current sink nodes std::string sink_ops_key_3 = makeForwardPropogationOperationsKey( FP_operations[operations_iter3].result.time_step, FP_operations[operations_iter3].result.sink_node->getType(), FP_operations[operations_iter3].result.sink_node->getIntegration()->getName(), FP_operations[operations_iter3].result.sink_node->getActivation()->getName(), FP_operations[operations_iter3].result.sink_node->getLayerName(), FP_operations[operations_iter3].result.sink_node->getTensorIndex().first, FP_operations[operations_iter3].arguments[0].weight->getLayerName()); std::string sink_ops_key_3_no_t = makeForwardPropogationOperationsKey(0, FP_operations[operations_iter3].result.sink_node->getType(), FP_operations[operations_iter3].result.sink_node->getIntegration()->getName(), FP_operations[operations_iter3].result.sink_node->getActivation()->getName(), FP_operations[operations_iter3].result.sink_node->getLayerName(), FP_operations[operations_iter3].result.sink_node->getTensorIndex().first, FP_operations[operations_iter3].arguments[0].weight->getLayerName()); for (auto& argument3 : FP_operations[operations_iter3].arguments) { std::string source_ops_key_3 = makeForwardPropogationOperationsKey(argument3.time_step, argument3.source_node->getType(), argument3.source_node->getIntegration()->getName(), argument3.source_node->getActivation()->getName(), argument3.source_node->getLayerName(), argument3.source_node->getTensorIndex().first, argument3.weight->getLayerName()); std::string source_ops_key_3_no_t = makeForwardPropogationOperationsKey(argument3.time_step, argument3.source_node->getType(), argument3.source_node->getIntegration()->getName(), argument3.source_node->getActivation()->getName(), argument3.source_node->getLayerName(), argument3.source_node->getTensorIndex().first, argument3.weight->getLayerName()); // Check if sink node1 will be compatible as future source node if (argument3.source_node->getName() == FP_operations[operations_iter1].result.sink_node->getName()) { sinkAsSourceNode_1v.push_back(source_ops_key_3 + ":" + sink_ops_key_3); } if (source_ops_key_3_no_t == sink_ops_key_1_no_t) { sinkAsSourceNode_1s.insert(FP_operations[operations_iter3].result.sink_node->getName() + "|" + std::to_string(FP_operations[operations_iter3].result.time_step)); } // Check if sink node2 will be compatible as future source node if (argument3.source_node->getName() == FP_operations[operations_iter2].result.sink_node->getName()) { sinkAsSourceNode_2v.push_back(source_ops_key_3 + ":" + sink_ops_key_3); } if (source_ops_key_3_no_t == sink_ops_key_2_no_t) { sinkAsSourceNode_2s.insert(FP_operations[operations_iter3].result.sink_node->getName() + "|" + std::to_string(FP_operations[operations_iter3].result.time_step)); } // Checks for source node 1 for (const auto& argument1 : FP_operations[operations_iter1].arguments) { std::string ops_key = makeForwardPropogationOperationsKey(argument1.time_step, argument1.source_node->getType(), argument1.source_node->getIntegration()->getName(), argument1.source_node->getActivation()->getName(), argument1.source_node->getLayerName(), argument1.source_node->getTensorIndex().first, argument1.weight->getLayerName()); std::string ops_key_no_t = makeForwardPropogationOperationsKey(0, argument1.source_node->getType(), argument1.source_node->getIntegration()->getName(), argument1.source_node->getActivation()->getName(), argument1.source_node->getLayerName(), argument1.source_node->getTensorIndex().first, argument1.weight->getLayerName()); // Check if the source nodes will be compatible as future source nodes if (argument3.source_node->getName() == argument1.source_node->getName()) { sourceAsSourceNode_1v.push_back(source_ops_key_3 + ":" + sink_ops_key_3); } if (source_ops_key_3_no_t == ops_key_no_t) { sourceAsSourceNode_1s.insert(FP_operations[operations_iter3].result.sink_node->getName() + "|" + std::to_string(FP_operations[operations_iter3].result.time_step)); } // Check if the source nodes will be compatible as sink nodes if (FP_operations[operations_iter3].result.sink_node->getName() == argument1.source_node->getName()) { sourceAsSinkNode_1v.push_back(source_ops_key_3 + ":" + sink_ops_key_3); } if (sink_ops_key_3_no_t == ops_key_no_t) { sourceAsSinkNode_1s.insert(argument3.source_node->getName() + "|" + std::to_string(argument3.time_step)); } // Check if the sink nodes will be compatible with future sink nodes if (FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter1].result.sink_node->getName()) { sinkAsSinkNode_1v.push_back(source_ops_key_3 + ":" + sink_ops_key_3); } if (sink_ops_key_3_no_t == sink_ops_key_1_no_t) { sinkAsSinkNode_1s.insert(argument3.source_node->getName() + "|" + std::to_string(argument3.time_step)); } // Check if the operations will be compatible if (source_ops_key_3 == ops_key && sink_ops_key_3 == sink_ops_key_1) { opsCompatibility_1s.insert(sink_node_key3); } } // Checks for source nodes 2 for (const auto& argument2 : FP_operations[operations_iter2].arguments) { std::string ops_key = makeForwardPropogationOperationsKey(argument2.time_step, argument2.source_node->getType(), argument2.source_node->getIntegration()->getName(), argument2.source_node->getActivation()->getName(), argument2.source_node->getLayerName(), argument2.source_node->getTensorIndex().first, argument2.weight->getLayerName()); std::string ops_key_no_t = makeForwardPropogationOperationsKey(0, argument2.source_node->getType(), argument2.source_node->getIntegration()->getName(), argument2.source_node->getActivation()->getName(), argument2.source_node->getLayerName(), argument2.source_node->getTensorIndex().first, argument2.weight->getLayerName()); // Check if the source nodes will be compatible as future source nodes if (argument3.source_node->getName() == argument2.source_node->getName()) { sourceAsSourceNode_2v.push_back(source_ops_key_3 + ":" + sink_ops_key_3); } if (source_ops_key_3_no_t == ops_key_no_t) { sourceAsSourceNode_2s.insert(FP_operations[operations_iter3].result.sink_node->getName() + "|" + std::to_string(FP_operations[operations_iter3].result.time_step)); } // Check if the source nodes will be compatible as sink nodes if (FP_operations[operations_iter3].result.sink_node->getName() == argument2.source_node->getName()) { sourceAsSinkNode_2v.push_back(source_ops_key_3 + ":" + sink_ops_key_3); } if (sink_ops_key_3_no_t == ops_key_no_t) { sourceAsSinkNode_2s.insert(argument3.source_node->getName() + "|" + std::to_string(argument3.time_step)); } // Check if the sink nodes will be compatible with future sink nodes if (FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter2].result.sink_node->getName()) { sinkAsSinkNode_2v.push_back(source_ops_key_3 + ":" + sink_ops_key_3); } if (sink_ops_key_3_no_t == sink_ops_key_2_no_t) { sinkAsSinkNode_2s.insert(argument3.source_node->getName() + "|" + std::to_string(argument3.time_step)); } // Check if the operations will be compatible if (source_ops_key_3 == ops_key && sink_ops_key_3 == sink_ops_key_2) { opsCompatibility_2s.insert(sink_node_key3); } } // Operation 4 checks if (argument3.source_node->getName() == FP_operations[operations_iter1].result.sink_node->getName() || argument3.source_node->getName() == FP_operations[operations_iter2].result.sink_node->getName() || FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter1].result.sink_node->getName() || FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter2].result.sink_node->getName() || argument3.source_node->getName() == FP_operations[operations_iter1].arguments[0].source_node->getName() || //ASSUMPTION: arguments are of length 1! argument3.source_node->getName() == FP_operations[operations_iter2].arguments[0].source_node->getName() || FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter1].arguments[0].source_node->getName() || //ASSUMPTION: arguments are of length 1! FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter2].arguments[0].source_node->getName() ) { for (size_t operations_iter4 = operations_iter1; operations_iter4 < FP_operations.size(); ++operations_iter4) { std::string sink_node_key4 = FP_operations[operations_iter4].result.sink_node->getName() + "/" + std::to_string(operations_iter4); if (identified_sink_nodes.count(sink_node_key4)) continue; //if (operations_iter4 == operations_iter1 || operations_iter4 == operations_iter2 || operations_iter4 == operations_iter3) continue; // Skip current sink nodes std::string sink_ops_key_4 = makeForwardPropogationOperationsKey( FP_operations[operations_iter4].result.time_step, FP_operations[operations_iter4].result.sink_node->getType(), FP_operations[operations_iter4].result.sink_node->getIntegration()->getName(), FP_operations[operations_iter4].result.sink_node->getActivation()->getName(), FP_operations[operations_iter4].result.sink_node->getLayerName(), FP_operations[operations_iter4].result.sink_node->getTensorIndex().first, FP_operations[operations_iter4].arguments[0].weight->getLayerName()); std::string sink_ops_key_4_no_t = makeForwardPropogationOperationsKey(0, FP_operations[operations_iter4].result.sink_node->getType(), FP_operations[operations_iter4].result.sink_node->getIntegration()->getName(), FP_operations[operations_iter4].result.sink_node->getActivation()->getName(), FP_operations[operations_iter4].result.sink_node->getLayerName(), FP_operations[operations_iter4].result.sink_node->getTensorIndex().first, FP_operations[operations_iter4].arguments[0].weight->getLayerName()); for (auto& argument4 : FP_operations[operations_iter4].arguments) { std::string source_ops_key_4 = makeForwardPropogationOperationsKey(argument4.time_step, argument4.source_node->getType(), argument4.source_node->getIntegration()->getName(), argument4.source_node->getActivation()->getName(), argument4.source_node->getLayerName(), argument4.source_node->getTensorIndex().first, argument4.weight->getLayerName()); std::string source_ops_key_4_no_t = makeForwardPropogationOperationsKey(0, argument4.source_node->getType(), argument4.source_node->getIntegration()->getName(), argument4.source_node->getActivation()->getName(), argument4.source_node->getLayerName(), argument4.source_node->getTensorIndex().first, argument4.weight->getLayerName()); // Check all future layers that the sink node may be combined with as a sink node if (FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter1].result.sink_node->getName() && argument4.source_node->getName() == argument3.source_node->getName()) { sinkAsSinkSinkNode_1v.push_back(source_ops_key_4 + ":" + sink_ops_key_4); } if (FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter1].result.sink_node->getName() && source_ops_key_4_no_t == source_ops_key_3_no_t) { sinkAsSinkSinkNode_1s.insert(FP_operations[operations_iter4].result.sink_node->getName() + "|" + std::to_string(FP_operations[operations_iter4].result.time_step)); } if (FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter2].result.sink_node->getName() && argument4.source_node->getName() == argument3.source_node->getName()) { sinkAsSinkSinkNode_2v.push_back(source_ops_key_4 + ":" + sink_ops_key_4); } if (FP_operations[operations_iter3].result.sink_node->getName() == FP_operations[operations_iter2].result.sink_node->getName() && source_ops_key_4_no_t == source_ops_key_3_no_t) { sinkAsSinkSinkNode_2s.insert(FP_operations[operations_iter4].result.sink_node->getName() + "|" + std::to_string(FP_operations[operations_iter4].result.time_step)); } // Check all future layers that the sink node may be combined with as a source node if (argument3.source_node->getName() == FP_operations[operations_iter1].result.sink_node->getName() && FP_operations[operations_iter4].result.sink_node->getName() == FP_operations[operations_iter3].result.sink_node->getName()) { sinkAsSourceSourceNode_1v.push_back(source_ops_key_4 + ":" + sink_ops_key_4); } if (argument3.source_node->getName() == FP_operations[operations_iter1].result.sink_node->getName() && sink_ops_key_4 == sink_ops_key_3) { sinkAsSourceSourceNode_1s.insert(argument4.source_node->getName() + "|" + std::to_string(argument4.time_step)); } if (argument3.source_node->getName() == FP_operations[operations_iter2].result.sink_node->getName() && FP_operations[operations_iter4].result.sink_node->getName() == FP_operations[operations_iter3].result.sink_node->getName()) { sinkAsSourceSourceNode_2v.push_back(source_ops_key_4 + ":" + sink_ops_key_4); } if (argument3.source_node->getName() == FP_operations[operations_iter2].result.sink_node->getName() && sink_ops_key_4 == sink_ops_key_3) { sinkAsSourceSourceNode_2s.insert(argument4.source_node->getName() + "|" + std::to_string(argument4.time_step)); } for (const auto& argument1 : FP_operations[operations_iter1].arguments) { std::string ops_key = makeForwardPropogationOperationsKey(argument1.time_step, argument1.source_node->getType(), argument1.source_node->getIntegration()->getName(), argument1.source_node->getActivation()->getName(), argument1.source_node->getLayerName(), argument1.source_node->getTensorIndex().first, argument1.weight->getLayerName()); std::string ops_key_no_t = makeForwardPropogationOperationsKey(0, argument1.source_node->getType(), argument1.source_node->getIntegration()->getName(), argument1.source_node->getActivation()->getName(), argument1.source_node->getLayerName(), argument1.source_node->getTensorIndex().first, argument1.weight->getLayerName()); // Check if the source nodes will be compatible as future source nodes if (argument3.source_node->getName() == argument1.source_node->getName() && FP_operations[operations_iter4].result.sink_node->getName() == FP_operations[operations_iter3].result.sink_node->getName()) { sourceAsSourceSourceNode_1v.push_back(source_ops_key_4 + ":" + sink_ops_key_4); } if (argument3.source_node->getName() == argument1.source_node->getName() && sink_ops_key_4_no_t == sink_ops_key_3_no_t) { sourceAsSourceSourceNode_1s.insert(argument4.source_node->getName() + "|" + std::to_string(argument4.time_step)); } // Check if the source nodes will be compatible as future sink nodes if (FP_operations[operations_iter3].result.sink_node->getName() == argument1.source_node->getName() && argument4.source_node->getName() == argument3.source_node->getName()) { sourceAsSinkSinkNode_1v.push_back(source_ops_key_4 + ":" + sink_ops_key_4); } if (FP_operations[operations_iter3].result.sink_node->getName() == argument1.source_node->getName() && source_ops_key_4_no_t == source_ops_key_3_no_t) { sourceAsSinkSinkNode_1s.insert(FP_operations[operations_iter4].result.sink_node->getName() + "|" + std::to_string(FP_operations[operations_iter4].result.time_step)); } } for (const auto& argument2 : FP_operations[operations_iter2].arguments) { std::string ops_key = makeForwardPropogationOperationsKey(argument2.time_step, argument2.source_node->getType(), argument2.source_node->getIntegration()->getName(), argument2.source_node->getActivation()->getName(), argument2.source_node->getLayerName(), argument2.source_node->getTensorIndex().first, argument2.weight->getLayerName()); std::string ops_key_no_t = makeForwardPropogationOperationsKey(0, argument2.source_node->getType(), argument2.source_node->getIntegration()->getName(), argument2.source_node->getActivation()->getName(), argument2.source_node->getLayerName(), argument2.source_node->getTensorIndex().first, argument2.weight->getLayerName()); // Check if the source nodes will be compatible as future source nodes if (argument3.source_node->getName() == argument2.source_node->getName() && FP_operations[operations_iter4].result.sink_node->getName() == FP_operations[operations_iter3].result.sink_node->getName()) { sourceAsSourceSourceNode_2v.push_back(source_ops_key_4 + ":" + sink_ops_key_4); } if (argument3.source_node->getName() == argument2.source_node->getName() && sink_ops_key_4_no_t == sink_ops_key_3_no_t) { sourceAsSourceSourceNode_2s.insert(argument4.source_node->getName() + "|" + std::to_string(argument4.time_step)); } // Check if the source nodes will be compatible as future sink nodes if (FP_operations[operations_iter3].result.sink_node->getName() == argument2.source_node->getName() && argument4.source_node->getName() == argument3.source_node->getName()) { sourceAsSinkSinkNode_2v.push_back(source_ops_key_4 + ":" + sink_ops_key_4); } if (FP_operations[operations_iter3].result.sink_node->getName() == argument2.source_node->getName() && source_ops_key_4_no_t == source_ops_key_3_no_t) { sourceAsSinkSinkNode_2s.insert(FP_operations[operations_iter4].result.sink_node->getName() + "|" + std::to_string(FP_operations[operations_iter4].result.time_step)); } } } } } } } // Sort the vectors std::sort(sinkAsSourceNode_1v.begin(), sinkAsSourceNode_1v.end()); std::sort(sinkAsSourceNode_2v.begin(), sinkAsSourceNode_2v.end()); std::sort(sourceAsSourceNode_1v.begin(), sourceAsSourceNode_1v.end()); std::sort(sourceAsSourceNode_2v.begin(), sourceAsSourceNode_2v.end()); std::sort(sinkAsSinkNode_1v.begin(), sinkAsSinkNode_1v.end()); std::sort(sinkAsSinkNode_2v.begin(), sinkAsSinkNode_2v.end()); std::sort(sourceAsSinkNode_1v.begin(), sourceAsSinkNode_1v.end()); std::sort(sourceAsSinkNode_2v.begin(), sourceAsSinkNode_2v.end()); std::sort(sinkAsSourceSourceNode_1v.begin(), sinkAsSourceSourceNode_1v.end()); std::sort(sinkAsSourceSourceNode_2v.begin(), sinkAsSourceSourceNode_2v.end()); std::sort(sourceAsSourceSourceNode_1v.begin(), sourceAsSourceSourceNode_1v.end()); std::sort(sourceAsSourceSourceNode_2v.begin(), sourceAsSourceSourceNode_2v.end()); std::sort(sinkAsSinkSinkNode_1v.begin(), sinkAsSinkSinkNode_1v.end()); std::sort(sinkAsSinkSinkNode_2v.begin(), sinkAsSinkSinkNode_2v.end()); std::sort(sourceAsSinkSinkNode_1v.begin(), sourceAsSinkSinkNode_1v.end()); std::sort(sourceAsSinkSinkNode_2v.begin(), sourceAsSinkSinkNode_2v.end()); // Check sets if (sinkAsSourceNode_1s != sinkAsSourceNode_2s) return false; if (sourceAsSourceNode_1s != sourceAsSourceNode_2s) return false; if (sinkAsSinkNode_1s != sinkAsSinkNode_2s) return false; if (sourceAsSinkNode_1s != sourceAsSinkNode_2s) return false; if (opsCompatibility_1s != opsCompatibility_2s) return false; if (sinkAsSourceSourceNode_1s != sinkAsSourceSourceNode_2s) return false; if (sourceAsSourceSourceNode_1s != sourceAsSourceSourceNode_2s) return false; if (sinkAsSinkSinkNode_1s != sinkAsSinkSinkNode_2s) return false; if (sourceAsSinkSinkNode_1s != sourceAsSinkSinkNode_2s) return false; // Check vectors if (sinkAsSourceNode_1v != sinkAsSourceNode_2v) return false; if (sourceAsSourceNode_1v != sourceAsSourceNode_2v) return false; if (sinkAsSinkNode_1v != sinkAsSinkNode_2v) return false; if (sourceAsSinkNode_1v != sourceAsSinkNode_2v) return false; if (sinkAsSourceSourceNode_1v != sinkAsSourceSourceNode_2v) return false; if (sourceAsSourceSourceNode_1v != sourceAsSourceSourceNode_2v) return false; if (sinkAsSinkSinkNode_1v != sinkAsSinkSinkNode_2v) return false; if (sourceAsSinkSinkNode_1v != sourceAsSinkSinkNode_2v) return false; return true; } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::getForwardPropogationLayerTensorDimensions(const std::vector<OperationList<TensorT>>& FP_operations, const std::map<std::string, std::vector<int>>& operations_map, std::vector<int>& source_layer_sizes, std::vector<int>& sink_layer_sizes, std::vector<std::vector<std::pair<int, int>>>& weight_indices, std::vector<std::map<std::string, std::vector<std::pair<int, int>>>>& shared_weight_indices, std::vector<std::vector<TensorT>>& weight_values, std::vector<bool>& make_source_tensors, std::vector<bool>& make_sink_tensors, std::vector<bool>& make_weight_tensors, std::vector<int>& source_layer_tensor_pos, std::vector<int>& sink_layer_tensor_pos, std::map<int, int>& layer_pos_max_size, std::map<std::string, int>& layer_name_pos, const int& tensor_layers_cnt, const int& weight_layers_cnt) { // track the layer_tensor positions for the source and sink nodes // as well as the weight_tensor positions int sink_layer_pos = tensor_layers_cnt; int source_layer_pos = sink_layer_pos + 1; int weight_pos = weight_layers_cnt; for (const auto& operations : operations_map) { // determine the tensor sizes int sink_layer_pos_tmp = sink_layer_pos; int source_layer_pos_tmp = source_layer_pos; int sink_layer_size = 0; int source_layer_size = 0; std::set<int> sink_layer_pos_check; std::set<int> source_layer_pos_check; std::vector<std::pair<int, int>> weight_index; std::map<std::string, std::vector<std::pair<int, int>>> shared_weight_index; std::vector<TensorT> weight_value; bool make_sink_tensor = false; bool make_source_tensor = false; bool make_weight_tensor = false; // internal variables to track changes in source/sink layer positions bool updated_source_layer_pos = false; for (const int& ops_index : operations.second) { // index sink node tensors (if it does not yet exist) int sink_layer_index = 0; bool increment_sink_layer_size = false; if (!FP_operations[ops_index].result.sink_node->getLayerName().empty() && layer_name_pos.count(FP_operations[ops_index].result.sink_node->getLayerName()) && FP_operations[ops_index].result.sink_node->getTensorIndex().first == -1) { sink_layer_pos_tmp = layer_name_pos.at(FP_operations[ops_index].result.sink_node->getLayerName()); sink_layer_index = layer_pos_max_size.at(sink_layer_pos_tmp) + 1; FP_operations[ops_index].result.sink_node->setTensorIndex(std::make_pair(sink_layer_pos_tmp, sink_layer_index)); increment_sink_layer_size = true; } else if (!FP_operations[ops_index].result.sink_node->getLayerName().empty() && layer_name_pos.count(FP_operations[ops_index].result.sink_node->getLayerName())) { sink_layer_pos_tmp = layer_name_pos.at(FP_operations[ops_index].result.sink_node->getLayerName()); sink_layer_index = FP_operations[ops_index].result.sink_node->getTensorIndex().second; } else if (FP_operations[ops_index].result.sink_node->getTensorIndex().first == -1) { FP_operations[ops_index].result.sink_node->setTensorIndex(std::make_pair(sink_layer_pos_tmp, sink_layer_size)); sink_layer_index = sink_layer_size; make_sink_tensor = true; increment_sink_layer_size = true; if (!FP_operations[ops_index].result.sink_node->getLayerName().empty()) layer_name_pos.emplace(FP_operations[ops_index].result.sink_node->getLayerName(), sink_layer_pos_tmp); } else { sink_layer_index = FP_operations[ops_index].result.sink_node->getTensorIndex().second; } // track the sink layer tensor position sizes sink_layer_pos_check.insert(FP_operations[ops_index].result.sink_node->getTensorIndex().first); auto found = layer_pos_max_size.emplace(FP_operations[ops_index].result.sink_node->getTensorIndex().first, sink_layer_index); if (!found.second && layer_pos_max_size.at(FP_operations[ops_index].result.sink_node->getTensorIndex().first) < sink_layer_index) layer_pos_max_size.at(FP_operations[ops_index].result.sink_node->getTensorIndex().first) = sink_layer_index; // move the source layer position back one because a sink node is not going to be made if (!updated_source_layer_pos && !make_sink_tensor) { source_layer_pos_tmp = sink_layer_pos; updated_source_layer_pos = true; } // index source node tensor (if it does not yet exist) for (const OperationArguments<TensorT>& argument : FP_operations[ops_index].arguments) { int source_layer_index = 0; bool increment_source_layer_size = false; if (!argument.source_node->getLayerName().empty() && layer_name_pos.count(argument.source_node->getLayerName()) && argument.source_node->getTensorIndex().first == -1) { source_layer_pos_tmp = layer_name_pos.at(argument.source_node->getLayerName()); source_layer_index = layer_pos_max_size.at(source_layer_pos_tmp) + 1; argument.source_node->setTensorIndex(std::make_pair(source_layer_pos_tmp, source_layer_index)); increment_source_layer_size = true; } else if (!argument.source_node->getLayerName().empty() && layer_name_pos.count(argument.source_node->getLayerName())) { source_layer_pos_tmp = layer_name_pos.at(argument.source_node->getLayerName()); source_layer_index = argument.source_node->getTensorIndex().second; } else if (argument.source_node->getTensorIndex().first == -1) { argument.source_node->setTensorIndex(std::make_pair(source_layer_pos_tmp, source_layer_size)); source_layer_index = source_layer_size; make_source_tensor = true; increment_source_layer_size = true; if (!argument.source_node->getLayerName().empty()) layer_name_pos.emplace(argument.source_node->getLayerName(), source_layer_pos_tmp); } else { source_layer_index = argument.source_node->getTensorIndex().second; } // track the source layer tensor position sizes source_layer_pos_check.insert(argument.source_node->getTensorIndex().first); auto found = layer_pos_max_size.emplace(argument.source_node->getTensorIndex().first, source_layer_index); if (!found.second && layer_pos_max_size.at(argument.source_node->getTensorIndex().first) < source_layer_index) layer_pos_max_size.at(argument.source_node->getTensorIndex().first) = source_layer_index; // index weight tensors if (argument.weight->getTensorIndex().size() == 0) { argument.weight->addTensorIndex(std::make_tuple(weight_pos, source_layer_index, sink_layer_index)); weight_index.push_back(std::make_pair(source_layer_index, sink_layer_index)); if (argument.weight->getInitWeight()) { TensorT tmp = argument.weight->getWeightInitOp()->operator()(); weight_value.push_back(tmp); argument.weight->setWeight(tmp); argument.weight->setInitWeight(false); // ensures that from now on the weight will not be re-initialized } else { weight_value.push_back(argument.weight->getWeight()); } make_weight_tensor = true; } else { argument.weight->addTensorIndex(std::make_tuple(weight_pos, source_layer_index, sink_layer_index)); weight_index.push_back(std::make_pair(source_layer_index, sink_layer_index)); weight_value.push_back(argument.weight->getWeight()); make_weight_tensor = true; // even if the weights are shared, we should still make a new weight tensor std::vector<std::pair<int, int>> tmp = { std::make_pair(source_layer_index, sink_layer_index) }; auto found = shared_weight_index.emplace(argument.weight->getName(), tmp); if (!found.second) { // add the new shared weight index shared_weight_index.at(argument.weight->getName()).push_back(std::make_pair(source_layer_index, sink_layer_index)); } else { // add the first shared weight index int weight_pos_0 = std::get<0>(argument.weight->getTensorIndex()[0]); if (weight_pos_0 != weight_pos) { clear_cache(); // clean up before exiting const std::string error = "The weight is shared across multiple tensors. This is currently not supported."; throw std::runtime_error(error); // if this fails, then the weight is shared with another layer. // the current weight sharing implementation cannot handle such cases. } int source_layer_index_0 = std::get<1>(argument.weight->getTensorIndex()[0]); int sink_layer_index_0 = std::get<2>(argument.weight->getTensorIndex()[0]); shared_weight_index.at(argument.weight->getName()).push_back(std::make_pair(source_layer_index_0, sink_layer_index_0)); } } if (increment_source_layer_size) ++source_layer_size; } if (increment_sink_layer_size) ++sink_layer_size; //? } // determine the actual source and sink layer sizes std::set<int> source_nodes, sink_nodes; for (const std::pair<int, int>& p : weight_index) { source_nodes.insert(p.first); sink_nodes.insert(p.second); } if (sink_layer_pos_check.size() != 1) { clear_cache(); // clean up before exiting std::string error_char = "Attempting to join sink nodes that are on different layers."; throw std::runtime_error(error_char); } if (source_layer_pos_check.size() != 1) { clear_cache(); // clean up before exiting std::string error_char = "Attempting to join source nodes that are on different layers."; throw std::runtime_error(error_char); } if (updated_source_layer_pos && make_sink_tensor) { clear_cache(); // clean up before exiting std::string error_char = "Attempting to join sink nodes that are on different layers."; throw std::runtime_error(error_char); } // store the tensor sizes sink_layer_sizes.push_back(*std::max_element(sink_nodes.begin(), sink_nodes.end()) + 1); // This is an estimate! source_layer_sizes.push_back(*std::max_element(source_nodes.begin(), source_nodes.end()) + 1); // This is an estimate! make_source_tensors.push_back(make_source_tensor); make_sink_tensors.push_back(make_sink_tensor); make_weight_tensors.push_back(make_weight_tensor); weight_indices.push_back(weight_index); weight_values.push_back(weight_value); shared_weight_indices.push_back(shared_weight_index); sink_layer_tensor_pos.push_back(*sink_layer_pos_check.begin()); source_layer_tensor_pos.push_back(*source_layer_pos_check.begin()); // Check that the source layer size is not less than the # of source nodes if (source_layer_sizes.back() < source_nodes.size() - 1) { // changed from != and add -1 clear_cache(); // clean up before exiting std::string error_char = "Attempting to join multiple source nodes into a single layer that were previously split into seperate layers."; throw std::runtime_error(error_char); } // Check that the sink layer size is not less than the # of sink nodes if (sink_layer_sizes.back() < sink_nodes.size() - 1) { // changed from != and added -1 clear_cache(); // clean up before exiting std::string error_char = "Attempting to join multiple sink nodes into a single layer that were previously split into seperate layers."; throw std::runtime_error(error_char); } // TODO: Missing the case where there are two different nodes with the same indices... // update the layer positions if (make_sink_tensor && make_source_tensor) { sink_layer_pos += 2; source_layer_pos = sink_layer_pos + 1; } else if (make_sink_tensor || make_source_tensor) { sink_layer_pos += 1; source_layer_pos = sink_layer_pos + 1; } // update the weight positions if (make_weight_tensor) { weight_pos += 1; } } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::setForwardPropogationLayerTensors_(const std::vector<OperationList<TensorT>>& FP_operations, const std::vector<std::map<std::string, std::vector<int>>>& tensor_ops_steps, const int& batch_size, const int& memory_size, const bool& train) { // Determine the Tensor sizes and whether the tensors need to be made std::map<int, int> layer_pos_max_size; // structure to track max layer size where the key is the layer and the value is the max size std::map<std::string, int> layer_name_pos; // structure to enforce all nodes with the same tensor name end up on the same tensor where the key is the layer name and the value is the layer position std::vector<std::vector<int>> source_layer_sizes_all, sink_layer_sizes_all; std::vector<std::vector<std::vector<TensorT>>> weight_values_all; std::vector<std::vector<std::vector<std::pair<int, int>>>> weight_indices_all; std::vector<std::vector<std::map<std::string, std::vector<std::pair<int, int>>>>> shared_weight_indices_all; std::vector<std::vector<bool>> make_source_tensors_all, make_sink_tensors_all, make_weight_tensors_all; std::vector<std::vector<int>> source_layer_pos_all, sink_layer_pos_all; int tensor_layers_cnt = 0; int weight_layers_cnt = 0; for (auto& tensor_ops_step : tensor_ops_steps) { if (tensor_ops_step.size() != 0) { std::vector<int> source_layer_sizes, sink_layer_sizes; std::vector<std::vector<TensorT>> weight_values; std::vector<std::vector<std::pair<int, int>>> weight_indices; std::vector<std::map<std::string, std::vector<std::pair<int, int>>>> shared_weight_indices; std::vector<bool> make_source_tensors, make_sink_tensors, make_weight_tensors; std::vector<int> source_layer_pos, sink_layer_pos; getForwardPropogationLayerTensorDimensions(FP_operations, tensor_ops_step, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, source_layer_pos, sink_layer_pos, layer_pos_max_size, layer_name_pos, tensor_layers_cnt, weight_layers_cnt); //allocateForwardPropogationLayerTensors(FP_operations, tensor_ops_step, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, batch_size, memory_size_buffered, train); // Count the tensor and weight layers that will be created for (const bool& make : make_source_tensors) if (make) ++tensor_layers_cnt; for (const bool& make : make_sink_tensors) if (make) ++tensor_layers_cnt; for (const bool& make : make_weight_tensors) if (make) ++weight_layers_cnt; // Record the tensor op step layers source_layer_sizes_all.push_back(source_layer_sizes); sink_layer_sizes_all.push_back(sink_layer_sizes); weight_values_all.push_back(weight_values); weight_indices_all.push_back(weight_indices); shared_weight_indices_all.push_back(shared_weight_indices); make_source_tensors_all.push_back(make_source_tensors); make_sink_tensors_all.push_back(make_sink_tensors); make_weight_tensors_all.push_back(make_weight_tensors); source_layer_pos_all.push_back(source_layer_pos); sink_layer_pos_all.push_back(sink_layer_pos); } } // correct source/sink layer sizes based off of the max layer size for (int i = 0; i < source_layer_pos_all.size(); ++i) { for (int j = 0; j < source_layer_pos_all.at(i).size(); ++j) { if (source_layer_sizes_all.at(i).at(j) != layer_pos_max_size.at(source_layer_pos_all.at(i).at(j)) + 1) source_layer_sizes_all.at(i).at(j) = layer_pos_max_size.at(source_layer_pos_all.at(i).at(j)) + 1; if (sink_layer_sizes_all.at(i).at(j) != layer_pos_max_size.at(sink_layer_pos_all.at(i).at(j)) + 1) sink_layer_sizes_all.at(i).at(j) = layer_pos_max_size.at(sink_layer_pos_all.at(i).at(j)) + 1; } } // Allocate the tensors using the corrected sizes int i = 0; for (auto& tensor_ops_step : tensor_ops_steps) { if (tensor_ops_step.size() != 0) { if (source_layer_sizes_all.at(i).size() == tensor_ops_step.size() && sink_layer_sizes_all.at(i).size() == tensor_ops_step.size() && weight_indices_all.at(i).size() == tensor_ops_step.size() && shared_weight_indices_all.at(i).size() == tensor_ops_step.size() && weight_values_all.at(i).size() == tensor_ops_step.size() && make_source_tensors_all.at(i).size() == tensor_ops_step.size() && make_sink_tensors_all.at(i).size() == tensor_ops_step.size() && make_weight_tensors_all.at(i).size() == tensor_ops_step.size()) { allocateForwardPropogationLayerTensors(FP_operations, tensor_ops_step, source_layer_sizes_all.at(i), sink_layer_sizes_all.at(i), weight_indices_all.at(i), shared_weight_indices_all.at(i), weight_values_all.at(i), make_source_tensors_all.at(i), make_sink_tensors_all.at(i), make_weight_tensors_all.at(i), batch_size, memory_size, train); } else { clear_cache(); throw std::runtime_error("The vector of layer sizes does not match the number of tensor operation steps!"); } } ++i; } } template<typename TensorT, typename DeviceT> std::string ModelInterpreter<TensorT, DeviceT>::makeForwardPropogationOperationsKey( const int & time_step, const NodeType& node_type, const std::string & node_integration, const std::string & node_activation, const std::string& node_layer_name, const int& node_layer_index, const std::string& weight_layer_name) { // [TODO: may not need to add in node type //std::string ops_key = std::to_string(time_step) + "/" + std::to_string(node_type) + "/" + node_integration + "/" + node_activation; std::string ops_key = std::to_string(time_step) + "/" + node_integration + "/" + node_activation + "/" + node_layer_name + "/" + weight_layer_name;// +"/" + std::to_string(layer_index); return ops_key; } template<typename TensorT, typename DeviceT> void ModelInterpreter<TensorT, DeviceT>::getForwardPropogationOperations(Model<TensorT>& model, const int& batch_size, const int& memory_size, const bool& train, const bool& fast_check, const bool& find_cycles, const bool& preserve_OoO) { // register the batch and memory sizes with the model // [TODO: add tests] model.setBatchAndMemorySizes(batch_size, memory_size); // buffer the memory size const int memory_size_buffered = memory_size + 1; // Get the forward operation steps if (tensor_ops_steps_.size() == 0) { // compile the model into a list of operations int iter = 0; std::vector<OperationList<TensorT>> FP_operations_expanded; if (preserve_OoO) getFPOpsOoO_(model, FP_operations_expanded, iter); else getFPOpsGraph_(model, FP_operations_expanded, iter); // identify tensor operation motifs in the list of operations std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = getTensorOperations(FP_operations_expanded, identified_sink_nodes, fast_check); std::vector<std::map<std::string, std::vector<int>>> tensor_ops_steps; tensor_ops_steps.resize(iter); for (auto& tensor_op : tensor_ops) { tensor_ops_steps[FP_operations_expanded[tensor_op.second[0]].operation_index].emplace(tensor_op.first, tensor_op.second); } // Save the list of operations for fast model check-pointing tensor_ops_steps_ = tensor_ops_steps; FP_operations_ = FP_operations_expanded; // Allocate tensor memory setForwardPropogationLayerTensors_(FP_operations_expanded, tensor_ops_steps, batch_size, memory_size_buffered, train); } // Work from the cache else { // Clear the tensor indices // NOTE: could be avoided by instead using the model directly // to keep track of the tensor indices during `getForwardPropogationLayerDimensions` for (auto& FP_operation : FP_operations_) { FP_operation.result.sink_node->setTensorIndex(std::make_pair(-1, -1)); for (auto& argument : FP_operation.arguments) { argument.source_node->setTensorIndex(std::make_pair(-1, -1)); argument.weight->clearTensorIndex(); } } setForwardPropogationLayerTensors_(FP_operations_, tensor_ops_steps_, batch_size, memory_size_buffered, train); } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::getFPOpsOoO_(Model<TensorT>& model, std::vector<OperationList<TensorT>>& FP_operations_expanded, int& iter) { FP_operations_expanded.clear(); iter = 0; // STEP 1: Preliminaries... // initialize the node statuses to determine the FP propogation steps for (auto& nodes_map : model.nodes_) { if (nodes_map.second->getType() == NodeType::input || nodes_map.second->getType() == NodeType::bias || nodes_map.second->getType() == NodeType::zero) nodes_map.second->setStatus(NodeStatus::activated); else nodes_map.second->setStatus(NodeStatus::initialized); } // STEP 2: Get a list of unoptimized operations for FP in As-soon-as-possible (ASAP) hierarchy const int max_iters = 1e6; std::vector<OperationList<TensorT>> FP_operations; for (; iter < max_iters; ++iter) { std::map<std::string, int> FP_operations_map; std::vector<OperationList<TensorT>> FP_operations_list; // get the next hidden layer getNextInactiveLayer(model, FP_operations_map, FP_operations_list); // get cycles std::map<std::string, int> FP_operations_map_cycles = FP_operations_map; std::vector<OperationList<TensorT>> FP_operations_list_cycles = FP_operations_list; std::set<std::string> sink_nodes_cycles; getNextInactiveLayerCycles(model, FP_operations_map_cycles, FP_operations_list_cycles, sink_nodes_cycles); // Remove all nodes involved in "cycles" that have arguments // involving source to sink node pairs not identified as cycles pruneInactiveLayerCycles(model, FP_operations_map, FP_operations_map_cycles, FP_operations_list, FP_operations_list_cycles, sink_nodes_cycles); // check if all nodes have been activated if (FP_operations_list.size() == 0) { break; } // activate sink nodes and update the Operations index for (auto& FP_operation : FP_operations_list) { FP_operation.result.sink_node->setStatus(NodeStatus::activated); FP_operation.operation_index = iter; FP_operations.push_back(FP_operation); } } // STEP 3: Pre-emptively expand the each operation from multi source to single output operations // to single source to single output operations //expandForwardPropogationOperations(FP_operations, FP_operations_expanded); // Slower and not needed... expandAllForwardPropogationOperations(FP_operations, FP_operations_expanded); } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::getFPOpsGraph_(Model<TensorT>& model, std::vector<OperationList<TensorT>>& FP_operations_expanded, int & iter) { FP_operations_expanded.clear(); // get all operations in the graph for (auto& link_map : model.links_) { // arguments // NOTE: each link is given it's own OperationList to avoid the eventual split downstream OperationArguments<TensorT> arguments; arguments.source_node = model.nodes_.at(link_map.second->getSourceNodeName()); arguments.weight = model.weights_.at(link_map.second->getWeightName()); arguments.time_step = 1; arguments.link_name = link_map.first; // results OperationList<TensorT> operation_list; OperationResult<TensorT> result; result.sink_node = model.nodes_.at(link_map.second->getSinkNodeName()); result.time_step = 0; operation_list.result = result; operation_list.arguments.push_back(arguments); operation_list.operation_index = 0; FP_operations_expanded.push_back(operation_list); } iter = 1; } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::addLayerTensor(std::shared_ptr<NodeTensorData<TensorT, DeviceT>>& layer) { layer_tensors_.push_back(layer); } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::clearLayerTensors() { layer_tensors_.clear(); } template<typename TensorT, typename DeviceT> inline std::shared_ptr<NodeTensorData<TensorT, DeviceT>> ModelInterpreter<TensorT, DeviceT>::getLayerTensor(const int & layer_index) { try { return layer_tensors_.at(layer_index); } catch (const std::exception& e) { std::cout << "Layer index " << layer_index << " does not exist" << std::endl; return std::shared_ptr<NodeTensorData<TensorT, DeviceT>>(); } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::addWeightTensor(std::shared_ptr<WeightTensorData<TensorT, DeviceT>>& weight) { weight_tensors_.push_back(weight); } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::clearWeightTensors() { weight_tensors_.clear(); } template<typename TensorT, typename DeviceT> inline std::shared_ptr<WeightTensorData<TensorT, DeviceT>> ModelInterpreter<TensorT, DeviceT>::getWeightTensor(const int & weight_index) { return weight_tensors_.at(weight_index); } template<typename TensorT, typename DeviceT> inline std::shared_ptr<ModelErrorData<TensorT, DeviceT>> ModelInterpreter<TensorT, DeviceT>::getModelError() { return model_error_; } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::addOperationSteps(const std::vector<OperationTensorStep<TensorT, DeviceT>>& operation_steps) { operation_steps_.push_back(operation_steps); } template<typename TensorT, typename DeviceT> inline std::vector<OperationTensorStep<TensorT, DeviceT>> ModelInterpreter<TensorT, DeviceT>::getOperationSteps(const int& operation_index) { return operation_steps_.at(operation_index); } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::clearOperationSteps() { operation_steps_.clear(); } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::FPTT(const int& time_steps) { // check time_steps vs memory_size int max_steps = time_steps; if (time_steps >= layer_tensors_[0]->getMemorySize()) { std::cout << "Time_steps will be scaled back to the memory_size - 1." << std::endl; max_steps = layer_tensors_[0]->getMemorySize() - 1; } for (int time_step = 0; time_step < max_steps; ++time_step) { const int time_step_cur = max_steps - 1 - time_step; executeForwardPropogationOperations(time_step_cur); } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::CETT(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& values, const std::vector<std::string>& node_names, std::shared_ptr<LossFunctionOp<TensorT>>& loss_function, std::shared_ptr<LossFunctionGradOp<TensorT>>& loss_function_grad, const int & time_steps) { // check time_steps vs memory_size // [NOTE: was changed form memory_size to memory_size - 1] int max_steps = time_steps; if (time_steps >= layer_tensors_[0]->getMemorySize()) { std::cout << "Time_steps will be scaled back to the memory_size - 1." << std::endl; max_steps = layer_tensors_[0]->getMemorySize() - 1; } if (values.dimension(1) - 1 > layer_tensors_[0]->getMemorySize()) std::cout << "The sequence for CETT needs to be the memory_size - 1!" << std::endl; // extract out the layer id const int layer_id = model.nodes_.at(node_names[0])->getTensorIndex().first; if (layer_id < 0) { clear_cache(); // clean up before exiting std::string error = "The output layer does not exist."; throw std::runtime_error(error); } if (getLayerTensor(layer_id)->getLayerSize() != node_names.size()) { std::string error = "The number of output nodes " + std::to_string(getLayerTensor(layer_id)->getLayerSize()) + " does not match the output layer tensor size " + std::to_string(node_names.size()); clear_cache(); // clean up before exiting throw std::runtime_error(error); } // convert the loss function std::shared_ptr<LossFunctionTensorOp<TensorT, DeviceT>> loss_function_tensor; LossFunctionOpToLossFunctionTensorOp<TensorT, DeviceT> loss_conv; loss_conv(loss_function, loss_function_tensor, std::vector<TensorT>() = {}); std::shared_ptr<LossFunctionGradTensorOp<TensorT, DeviceT>> loss_function_grad_tensor; LossFunctionGradOpToLossFunctionGradTensorOp<TensorT, DeviceT> loss_grad_conv; loss_grad_conv(loss_function_grad, loss_function_grad_tensor, std::vector<TensorT>() = {}); // NOTE: the output are stored [Tmax, Tmax - 1, ..., T=0, T=-1] where T=-1 is added automatically // so the expected values should also be stored [Tmax, Tmax - 1, ..., T=0, T=-1] for (int time_step = 0; time_step < max_steps; ++time_step) { // calculate the error for each batch of memory Eigen::Tensor<TensorT, 2> expected = values.chip(time_step, 1); executeModelErrorOperations(expected, layer_id, loss_function_tensor, loss_function_grad_tensor, time_step); } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::CMTT(Model<TensorT>& model, const Eigen::Tensor<TensorT, 3>& values, const std::vector<std::string>& node_names, std::shared_ptr<MetricFunctionOp<TensorT>>& metric_function, const int & time_steps, const int & metric_index) { // check time_steps vs memory_size int max_steps = time_steps; if (time_steps >= layer_tensors_[0]->getMemorySize()) { std::cout << "Time_steps will be scaled back to the memory_size - 1." << std::endl; max_steps = layer_tensors_[0]->getMemorySize() - 1; } if (values.dimension(1) - 1 > layer_tensors_[0]->getMemorySize()) std::cout << "The sequence for CETT needs to be the memory_size - 1!" << std::endl; // extract out the layer id const int layer_id = model.nodes_.at(node_names[0])->getTensorIndex().first; if (layer_id < 0) { clear_cache(); // clean up before exiting std::string error_char = "The output layer does not exist."; throw std::runtime_error(error_char); } if (getLayerTensor(layer_id)->getLayerSize() != node_names.size()) { clear_cache(); // clean up before exiting std::string error_char = "The number of output nodes " + std::to_string(getLayerTensor(layer_id)->getLayerSize()) + " does not match the output layer tensor size " + std::to_string(node_names.size()); throw std::runtime_error(error_char); } // convert the metric function std::shared_ptr<MetricFunctionTensorOp<TensorT, DeviceT>> metric_function_tensor; MetricFunctionOpToMetricFunctionTensorOp<TensorT, DeviceT> metric_conv; metric_conv(metric_function, metric_function_tensor, std::vector<TensorT>() = {}); // NOTE: the output are stored [Tmax, Tmax - 1, ..., T=0, T=-1] where T=-1 is added automatically // so the expected values should also be stored [Tmax, Tmax - 1, ..., T=0, T=-1] for (int time_step = 0; time_step < max_steps; ++time_step) { // calculate the error for each batch of memory Eigen::Tensor<TensorT, 2> expected = values.chip(time_step, 1); executeModelMetricOperations(expected, layer_id, metric_function_tensor, time_step, metric_index); } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::TBPTT(const int& time_steps) { // check time_steps vs memory_size int max_steps = time_steps; if (time_steps >= layer_tensors_[0]->getMemorySize()) { std::cout << "Time_steps will be scaled back to the memory_size - 1." << std::endl; max_steps = layer_tensors_[0]->getMemorySize() - 1; } for (int time_step = 0; time_step < max_steps; ++time_step) { // calculate the error for each batch of memory executeBackwardPropogationOperations(time_step); } } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::updateWeights(const int& iter) { executeWeightErrorOperations(); executeWeightUpdateOperations(iter); } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::setModelResources(const ModelResources & model_resources) { model_resources_ = model_resources; } template<typename TensorT, typename DeviceT> inline ModelResources ModelInterpreter<TensorT, DeviceT>::getModelResources() { return model_resources_; } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::clear_cache() { layer_tensors_.clear(); weight_tensors_.clear(); model_error_.reset(); operation_steps_.clear(); FP_operations_.clear(); tensor_ops_steps_.clear(); } template<typename TensorT, typename DeviceT> inline std::vector<std::map<std::string, std::vector<int>>> ModelInterpreter<TensorT, DeviceT>::getTensorOpsSteps() const { return tensor_ops_steps_; } template<typename TensorT, typename DeviceT> inline std::vector<OperationList<TensorT>> ModelInterpreter<TensorT, DeviceT>::getFPOperations() const { return FP_operations_; } template<typename TensorT, typename DeviceT> inline void ModelInterpreter<TensorT, DeviceT>::printTensorOpsSteps(std::string delimiter) const { const std::vector<std::string> headers = { "Operation", "source_node_name", "source_node_timestep", "weight_name", "sink_node_name", "sink_node_timestep" }; // Print the headers for (const std::string& header : headers) std::cout << header << delimiter; std::cout << "\n"; // Print the rows for (const auto& tensor_ops_step : tensor_ops_steps_) { for (const auto& tensor_op_map : tensor_ops_step) { for (const auto& tensor_op : tensor_op_map.second) { std::string sink_node_name = FP_operations_[tensor_op].result.sink_node->getName(); int sink_node_timestep = FP_operations_[tensor_op].result.time_step; for (const auto& argument : FP_operations_[tensor_op].arguments) { std::vector<std::string> row; row.push_back(tensor_op_map.first); row.push_back(argument.source_node->getName()); row.push_back(std::to_string(argument.time_step)); row.push_back(argument.weight->getName()); row.push_back(sink_node_name); row.push_back(std::to_string(sink_node_timestep)); // write to the console for (const std::string& e : row) std::cout << e << delimiter; std::cout << "\n"; } } } } } } #endif //EVONET_MODELINTERPRETER_H<file_sep>#ifndef EVONET_STATISTICS_H #define EVONET_STATISTICS_H #include <unsupported/Eigen/CXX11/Tensor> #include <math.h> #include <vector> #include <iostream> // Numerical Recipes C definitions #define EPS1 1.0e-24 #define EPS2 1.0e-24 namespace EvoNet { /* @brief Methods for statistics */ /* @brief Calculate the Confidence Intervals for a distribution of data @param[in] data Distribution of data @param[in] alpha Confidence level @return pair of lower bound and upper bounds for the data */ template<typename T> std::pair<T, T> confidence(const std::vector<T>& data, T alpha=0.05) { std::vector<T> data_sorted = data; std::sort(data_sorted.begin(), data_sorted.end()); int n = data_sorted.size(); T lb = data_sorted[int((alpha / 2.0)*n)]; T ub = data_sorted[int((1 - alpha / 2.0)*n)]; return std::make_pair(lb, ub); } /* @brief Moments of a distribution Given an array of data[1..n], this routine returns its mean ave, average deviation adev, standard deviation sdev, variance var, skewness skew, and kurtosis curt. References: Numerical Recipes in C pg 613 */ template<typename T> void moment(T data[], int n, T &ave, T &adev, T &sdev, T &var, T &skew, T &curt) { int j; T ep = 0.0, s, p; if (n <= 1) { std::cout << "n must be at least 2 in moment" << std::endl; return; } s = 0.0; for (j = 0; j < n; j++) s += data[j]; ave = s / n; adev = (var) = (skew) = (curt) = 0.0; for (j = 0; j < n; j++) { adev += fabs(s = data[j] - (ave)); ep += s; var += (p = s * s); skew += (p *= s); curt += (p *= s); } adev /= n; var = (var - ep * ep / n) / (n - 1); sdev = sqrt(var); if (var) { skew /= (n*(var)*(sdev)); curt = (curt) / (n*(var)*(var)) - 3.0; } else { std::cout << "No skew/kurtosis when variance = 0 (in moment)" << std::endl; return; } } /* @brief Kolmogorov - Smirnov probability function. References: Numerical Recipes in C pg 626 */ template<typename T> T probks(T alam) { int j; T a2, fac = 2.0, sum = 0.0, term, termbf = 0.0; a2 = -2.0*alam*alam; for (j = 1; j <= 100; j++) { term = fac * exp(a2*j*j); sum += term; if (fabs(term) <= EPS1 * termbf || fabs(term) <= EPS2 * sum) return sum; fac = -fac; termbf = fabs(term); } return 1.0; } /* @brief Kolmogorov-Smirnov Test two way Given an array data1[1..n1], and an array data2[1..n2], this routine returns the K– S statistic d, and the significance level prob for the null hypothesis that the data sets are drawn from the same distribution.Small values of prob showthat the cumulative distribution function of data1 is significantly different from that of data2.The arrays data1 and data2 are modified by being sorted into ascending order. References: Numerical Recipes in C pg 625 */ template<typename T> void kstwo(T data1[], unsigned long n1, T data2[], unsigned long n2, T &d, T &prob) { unsigned long j1 = 0, j2 = 0; T d1, d2, dt, en1, en2, en, fn1 = 0.0, fn2 = 0.0; std::sort(data1, data1+n1); std::sort(data2, data2+n2); en1 = n1; en2 = n2; d = 0.0; while (j1 < n1 && j2 < n2) { if ((d1 = data1[j1]) <= (d2 = data2[j2])) fn1 = j1++ / en1; if (d2 <= d1) fn2 = j2++ / en2; if ((dt = fabs(fn2 - fn1)) > d) d = dt; } en = sqrt(en1*en2 / (en1 + en2)); prob = probks((en + 0.12 + 0.11 / en)*(d)); } template<typename T> void initLogFacs(T* logFacs, int n) { logFacs[0] = 0; for (int i = 1; i < n + 1; ++i) { logFacs[i] = logFacs[i - 1] + std::log((T)i); // only n times of log() calls } } template<typename T> T logHypergeometricProb(T* logFacs, int a, int b, int c, int d) { return logFacs[a + b] + logFacs[c + d] + logFacs[a + c] + logFacs[b + d] - logFacs[a] - logFacs[b] - logFacs[c] - logFacs[d] - logFacs[a + b + c + d]; } /* @brief Implementation of Fishers exact test Example: m = Genes IN GO term n = Genes NOT IN GO term k = Gene hits, that is, differentially expressed x = Genes both IN GO term and differentially expressed 'hits' where a = x, b = m - x, c = k - x, and d = n - (k - x) @returns one-tailed probability */ template<typename T> T fisherExactTest(const int& a, const int& b, const int& c, const int& d) { int n = a + b + c + d; T* logFacs = new T[n + 1]; // *** dynamically allocate memory logFacs[0..n] *** initLogFacs(logFacs, n); // *** initialize logFacs array *** T logpCutoff = logHypergeometricProb(logFacs, a, b, c, d); // *** logFacs added T pFraction = 0; for (int x = 0; x <= n; ++x) { if (a + b - x >= 0 && a + c - x >= 0 && d - a + x >= 0) { T l = logHypergeometricProb(logFacs, x, a + b - x, a + c - x, d - a + x); if (l <= logpCutoff) pFraction += std::exp(l - logpCutoff); } } T logpValue = logpCutoff + log(pFraction); delete[] logFacs; return std::exp(logpValue); } } #endif //EVONET_STATISTICS_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELINTERPRETERGPU_H #define EVONET_MODELINTERPRETERGPU_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> // .h #include <EvoNet/ml/ModelInterpreter.h> #include <unsupported/Eigen/CXX11/Tensor> #include <cereal/access.hpp> // serialiation of private members #undef min // clashes with std::limit on windows in polymorphic.hpp #undef max // clashes with std::limit on windows in polymorphic.hpp #include <cereal/types/polymorphic.hpp> // .cpp #include <EvoNet/ml/ModelErrorData.h> #include <EvoNet/ml/ModelKernalGpu.h> namespace EvoNet { template<typename TensorT> class ModelInterpreterGpu : public ModelInterpreter<TensorT, Eigen::GpuDevice> { public: using ModelInterpreter<TensorT, Eigen::GpuDevice>::ModelInterpreter; void allocateForwardPropogationLayerTensors(const std::vector<OperationList<TensorT>>& FP_operations, const std::map<std::string, std::vector<int>>& operations_map, const std::vector<int>& source_layer_sizes, const std::vector<int>& sink_layer_sizes, const std::vector<std::vector<std::pair<int, int>>> weight_indices, std::vector<std::map<std::string, std::vector<std::pair<int, int>>>>& shared_weight_indices, const std::vector<std::vector<TensorT>>& weight_values, const std::vector<bool>& make_source_tensors, const std::vector<bool>& make_sink_tensors, const std::vector<bool>& make_weight_tensors, const int& batch_size, const int& memory_size, const bool& train) override; void executeForwardPropogationOperations(const int& time_step) override; void executeModelErrorOperations(Eigen::Tensor<TensorT, 2>& expected, const int& layer_id, std::shared_ptr<LossFunctionTensorOp<TensorT,Eigen::GpuDevice>>& loss_function, std::shared_ptr<LossFunctionGradTensorOp<TensorT,Eigen::GpuDevice>>& loss_function_grad, const int& time_step) override; void executeModelMetricOperations(Eigen::Tensor<TensorT, 2>& expected, const int& layer_id, std::shared_ptr<MetricFunctionTensorOp<TensorT,Eigen::GpuDevice>> metric_function, const int& time_step, const int& metric_index) override; void executeBackwardPropogationOperations(const int& time_step) override; void executeWeightErrorOperations() override; void executeWeightUpdateOperations(const int& iter) override; void allocateModelErrorTensor(const int& batch_size, const int& memory_size, const int& n_metrics) override; void getModelResults(Model<TensorT>& model, const bool& output_nodes, const bool& weights, const bool& model_error, const bool& input_nodes) override; void checkMemory(const Model<TensorT>& model, const int& batch_size, const int& memory_size) override; void updateSolverParams(const int& param_index, const TensorT& param_factor) override; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ModelInterpreter<TensorT, Eigen::GpuDevice>>(this)); } }; template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::allocateForwardPropogationLayerTensors( const std::vector<OperationList<TensorT>>& FP_operations, const std::map<std::string, std::vector<int>>& operations_map, const std::vector<int>& source_layer_sizes, const std::vector<int>& sink_layer_sizes, const std::vector<std::vector<std::pair<int, int>>> weight_indices, std::vector<std::map<std::string, std::vector<std::pair<int, int>>>>& shared_weight_indices, const std::vector<std::vector<TensorT>>& weight_values, const std::vector<bool>& make_source_tensors, const std::vector<bool>& make_sink_tensors, const std::vector<bool>& make_weight_tensors, const int & batch_size, const int & memory_size, const bool & train) { // ensure that all tensors are allocated on the correct device assert(cudaSetDevice(this->getModelResources().at(0).getID()) == cudaSuccess); // is this needed? std::vector<OperationTensorStep<TensorT, Eigen::GpuDevice>> operation_step_list; ActivationOpToActivationTensorOp<TensorT, Eigen::GpuDevice> activation_conv; SolverOpToSolverTensorOp<TensorT, Eigen::GpuDevice> solver_conv; IntegrationOpToIntegrationTensorOp<TensorT, Eigen::GpuDevice> integration_conv; IntegrationErrorOpToIntegrationErrorTensorOp<TensorT, Eigen::GpuDevice> integration_error_conv; IntegrationWeightGradOpToIntegrationWeightGradTensorOp<TensorT, Eigen::GpuDevice> integration_weight_grad_conv; int iter = 0; for (const auto& operations : operations_map) { // make the tensors OperationTensorStep<TensorT, Eigen::GpuDevice> operation_step; // [NOTE: order matters! sink layer should come before the source layer to keep with // the ordering generated in getForwardPropogationTensorDimensions.] std::shared_ptr<NodeTensorData<TensorT, Eigen::GpuDevice>> sink_node_data(new NodeTensorDataGpu<TensorT>()); { // make the sink layer tensor and add it to the cache and operation step std::shared_ptr<ActivationTensorOp<TensorT, Eigen::GpuDevice>> activation = nullptr; std::shared_ptr<ActivationTensorOp<TensorT, Eigen::GpuDevice>> activation_grad = nullptr; std::shared_ptr<IntegrationTensorOp<TensorT, Eigen::GpuDevice>> integration = nullptr; std::shared_ptr<IntegrationErrorTensorOp<TensorT, Eigen::GpuDevice>> integration_error = nullptr; std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, Eigen::GpuDevice>> integration_weight_grad = nullptr; if (make_sink_tensors[iter]) { sink_node_data->initNodeTensorData(batch_size, memory_size, sink_layer_sizes[iter], FP_operations[operations.second[0]].result.sink_node->getType(), FP_operations[operations.second[0]].result.sink_node->getIntegrationShared()->getName(), train); this->layer_tensors_.push_back(sink_node_data); operation_step.sink_layer.time_step = FP_operations[operations.second[0]].result.time_step; activation_conv(FP_operations[operations.second[0]].result.sink_node->getActivationShared(), activation, std::vector<TensorT>() = {}); operation_step.sink_layer.activation = activation; activation_conv(FP_operations[operations.second[0]].result.sink_node->getActivationGradShared(), activation_grad, std::vector<TensorT>() = {}); operation_step.sink_layer.activation_grad = activation_grad; integration_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationShared(), integration, std::vector<TensorT>() = {}); operation_step.sink_layer.integration = integration; integration_error_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationErrorShared(), integration_error, std::vector<TensorT>() = {}); operation_step.sink_layer.integration_error = integration_error; integration_weight_grad_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationWeightGradShared(), integration_weight_grad, std::vector<TensorT>() = {}); operation_step.sink_layer.integration_weight_grad = integration_weight_grad; operation_step.sink_layer.tensor_index = FP_operations[operations.second[0]].result.sink_node->getTensorIndex().first; } else { operation_step.sink_layer.tensor_index = FP_operations[operations.second[0]].result.sink_node->getTensorIndex().first; operation_step.sink_layer.time_step = FP_operations[operations.second[0]].result.time_step; activation_conv(FP_operations[operations.second[0]].result.sink_node->getActivationShared(), activation, std::vector<TensorT>() = {}); operation_step.sink_layer.activation = activation; activation_conv(FP_operations[operations.second[0]].result.sink_node->getActivationGradShared(), activation_grad, std::vector<TensorT>() = {}); operation_step.sink_layer.activation_grad = activation_grad; integration_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationShared(), integration, std::vector<TensorT>() = {}); operation_step.sink_layer.integration = integration; integration_error_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationErrorShared(), integration_error, std::vector<TensorT>() = {}); operation_step.sink_layer.integration_error= integration_error; integration_weight_grad_conv(FP_operations[operations.second[0]].result.sink_node->getIntegrationWeightGradShared(), integration_weight_grad, std::vector<TensorT>() = {}); operation_step.sink_layer.integration_weight_grad = integration_weight_grad; operation_step.sink_layer.time_step = FP_operations[operations.second[0]].result.time_step; } } std::shared_ptr<NodeTensorData<TensorT, Eigen::GpuDevice>> source_node_data(new NodeTensorDataGpu<TensorT>()); { // make the source layer tensor and add it to the cache and operation step std::shared_ptr<ActivationTensorOp<TensorT, Eigen::GpuDevice>> activation = nullptr; std::shared_ptr<ActivationTensorOp<TensorT, Eigen::GpuDevice>> activation_grad = nullptr; std::shared_ptr<IntegrationTensorOp<TensorT, Eigen::GpuDevice>> integration = nullptr; std::shared_ptr<IntegrationErrorTensorOp<TensorT, Eigen::GpuDevice>> integration_error = nullptr; std::shared_ptr<IntegrationWeightGradTensorOp<TensorT, Eigen::GpuDevice>> integration_weight_grad = nullptr; if (make_source_tensors[iter]) { source_node_data->initNodeTensorData(batch_size, memory_size, source_layer_sizes[iter], FP_operations[operations.second[0]].arguments[0].source_node->getType(), FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationShared()->getName(), train); operation_step.source_layer.time_step = FP_operations[operations.second[0]].arguments[0].time_step; this->layer_tensors_.push_back(source_node_data); activation_conv(FP_operations[operations.second[0]].arguments[0].source_node->getActivationShared(), activation, std::vector<TensorT>() = {}); operation_step.source_layer.activation = activation; activation_conv(FP_operations[operations.second[0]].arguments[0].source_node->getActivationGradShared(), activation_grad, std::vector<TensorT>() = {}); operation_step.source_layer.activation_grad = activation_grad; integration_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationShared(), integration, std::vector<TensorT>() = {}); operation_step.source_layer.integration = integration; integration_error_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationErrorShared(), integration_error, std::vector<TensorT>() = {}); operation_step.source_layer.integration_error = integration_error; integration_weight_grad_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationWeightGradShared(), integration_weight_grad, std::vector<TensorT>() = {}); operation_step.source_layer.integration_weight_grad = integration_weight_grad; operation_step.source_layer.tensor_index = FP_operations[operations.second[0]].arguments[0].source_node->getTensorIndex().first; } else { operation_step.source_layer.tensor_index = FP_operations[operations.second[0]].arguments[0].source_node->getTensorIndex().first; operation_step.source_layer.time_step = FP_operations[operations.second[0]].arguments[0].time_step; activation_conv(FP_operations[operations.second[0]].arguments[0].source_node->getActivationShared(), activation, std::vector<TensorT>() = {}); operation_step.source_layer.activation = activation; activation_conv(FP_operations[operations.second[0]].arguments[0].source_node->getActivationGradShared(), activation_grad, std::vector<TensorT>() = {}); operation_step.source_layer.activation_grad = activation_grad; integration_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationShared(), integration, std::vector<TensorT>() = {}); operation_step.source_layer.integration = integration; integration_error_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationErrorShared(), integration_error, std::vector<TensorT>() = {}); operation_step.source_layer.integration_error = integration_error; integration_weight_grad_conv(FP_operations[operations.second[0]].arguments[0].source_node->getIntegrationWeightGradShared(), integration_weight_grad, std::vector<TensorT>() = {}); operation_step.source_layer.integration_weight_grad = integration_weight_grad; } } // make the weight tensor and add it to the cache and operation step std::shared_ptr<WeightTensorData<TensorT, Eigen::GpuDevice>> weight_data = std::make_shared<WeightTensorDataGpu<TensorT>>(WeightTensorDataGpu<TensorT>()); if (make_weight_tensors[iter]) { std::shared_ptr<SolverTensorOp<TensorT, Eigen::GpuDevice>> solver = nullptr; std::vector<TensorT> solver_params; solver_conv(FP_operations[operations.second[0]].arguments[0].weight->getSolverOpShared(), solver, solver_params); weight_data->initWeightTensorData(source_layer_sizes[iter], sink_layer_sizes[iter], weight_indices[iter], shared_weight_indices[iter], weight_values[iter], train, solver_params, FP_operations[operations.second[0]].result.sink_node->getIntegrationShared()->getName()); this->weight_tensors_.push_back(weight_data); operation_step.weight.tensor_index = std::get<0>(FP_operations[operations.second[0]].arguments[0].weight->getTensorIndex()[0]); operation_step.weight.solver = solver; } else { std::cout << "Weight tensor is not being created...Check!" << std::endl; } operation_step_list.push_back(operation_step); ++iter; } // add the operations to the cache this->operation_steps_.push_back(operation_step_list); } template<typename TensorT> void ModelInterpreterGpu<TensorT>::executeForwardPropogationOperations(const int& time_step) { for (auto& operations_list : this->operation_steps_) { // Set up the device, streams, and kernals ModelKernalGpu<TensorT> model_kernal; assert(cudaSetDevice(this->getModelResources().at(0).getID()) == cudaSuccess); // is this needed? std::vector<cudaStream_t> streams; for (size_t i = 0; i < operations_list.size(); ++i) { cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); streams.push_back(stream); } // execute the forward propogation steps int device_iter = 0; for (OperationTensorStep<TensorT, Eigen::GpuDevice>& operation : operations_list) { Eigen::GpuStreamDevice stream_device(&streams[device_iter], getModelResources().at(0).getID()); Eigen::GpuDevice device(&stream_device); if (!this->layer_tensors_.at(operation.source_layer.tensor_index)->getOutputStatus().second) this->layer_tensors_.at(operation.source_layer.tensor_index)->syncHAndDOutput(device); if (!this->weight_tensors_.at(operation.weight.tensor_index)->getWeightStatus().second) this->weight_tensors_.at(operation.weight.tensor_index)->syncHAndDWeight(device); if (!this->layer_tensors_.at(operation.sink_layer.tensor_index)->getInputStatus().second) this->layer_tensors_.at(operation.sink_layer.tensor_index)->syncHAndDInput(device); model_kernal.executeForwardPropogation( this->layer_tensors_.at(operation.source_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDOutputPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDWeightPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHInputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDInputPointer().get(), operation.sink_layer.integration, this->layer_tensors_.at(operation.source_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), operation.source_layer.time_step + time_step, operation.sink_layer.time_step + time_step, device); if (!this->layer_tensors_.at(operation.sink_layer.tensor_index)->getOutputStatus().second) this->layer_tensors_.at(operation.sink_layer.tensor_index)->syncHAndDOutput(device); if (!this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDtStatus().second) this->layer_tensors_.at(operation.sink_layer.tensor_index)->syncHAndDDt(device); model_kernal.executeNodeActivation( this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHInputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDInputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDOutputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHDtPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDDtPointer().get(), operation.sink_layer.activation, this->layer_tensors_.at(operation.sink_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), operation.sink_layer.time_step + time_step, device); ++device_iter; } // sync and destroy the streams for (size_t i = 0; i < operations_list.size(); ++i) { assert(cudaStreamSynchronize(streams[i]) == cudaSuccess); assert(cudaStreamDestroy(streams[i]) == cudaSuccess); } } } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::executeBackwardPropogationOperations(const int & time_step) { for (int iter = this->operation_steps_.size() - 1; iter >= 0; --iter) { //iterate backwards // Set up the device, streams, and kernals ModelKernalGpu<TensorT> model_kernal; assert(cudaSetDevice(this->getModelResources().at(0).getID()) == cudaSuccess); // is this needed? std::vector<cudaStream_t> streams; for (size_t i = 0; i < this->operation_steps_[iter].size(); ++i) { cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); streams.push_back(stream); } // execute the forward propogation steps int device_iter = 0; for (OperationTensorStep<TensorT, Eigen::GpuDevice>& operation : this->operation_steps_[iter]) { //reverse source/sink Eigen::GpuStreamDevice stream_device(&streams[device_iter], getModelResources().at(0).getID()); Eigen::GpuDevice device(&stream_device); if (!this->layer_tensors_.at(operation.source_layer.tensor_index)->getOutputStatus().second) this->layer_tensors_.at(operation.source_layer.tensor_index)->syncHAndDOutput(device); if (!this->layer_tensors_.at(operation.source_layer.tensor_index)->getDerivativeStatus().second) this->layer_tensors_.at(operation.source_layer.tensor_index)->syncHAndDDerivative(device); model_kernal.executeNodeDerivative( this->layer_tensors_.at(operation.source_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHDerivativePointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDDerivativePointer().get(), operation.source_layer.activation_grad, this->layer_tensors_.at(operation.source_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), operation.source_layer.time_step + time_step, device); if (!this->layer_tensors_.at(operation.sink_layer.tensor_index)->getErrorStatus().second) this->layer_tensors_.at(operation.sink_layer.tensor_index)->syncHAndDError(device); if (!this->layer_tensors_.at(operation.sink_layer.tensor_index)->getInputStatus().second) this->layer_tensors_.at(operation.sink_layer.tensor_index)->syncHAndDInput(device); if (!this->weight_tensors_.at(operation.weight.tensor_index)->getWeightStatus().second) this->weight_tensors_.at(operation.weight.tensor_index)->syncHAndDWeight(device); if (!this->layer_tensors_.at(operation.source_layer.tensor_index)->getErrorStatus().second) this->layer_tensors_.at(operation.source_layer.tensor_index)->syncHAndDError(device); model_kernal.executeBackwardPropogation( this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHErrorPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDErrorPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHInputPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDInputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDOutputPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDWeightPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHErrorPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDErrorPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHDerivativePointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDDerivativePointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), // [TODO: replace with N] operation.sink_layer.integration_error, // changed from source_layer this->layer_tensors_.at(operation.sink_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), operation.sink_layer.time_step + time_step, operation.source_layer.time_step + time_step, device); ++device_iter; } // sync and destroy the streams for (size_t i = 0; i < this->operation_steps_[iter].size(); ++i) { assert(cudaStreamSynchronize(streams[i]) == cudaSuccess); assert(cudaStreamDestroy(streams[i]) == cudaSuccess); } } } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::executeModelErrorOperations(Eigen::Tensor<TensorT, 2>& expected, const int& layer_id, std::shared_ptr<LossFunctionTensorOp<TensorT,Eigen::GpuDevice>>& loss_function, std::shared_ptr<LossFunctionGradTensorOp<TensorT,Eigen::GpuDevice>>& loss_function_grad, const int& time_step) { // More performant if all model error calculations were passed at the same time ModelKernalGpu<TensorT> model_kernal; cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, getModelResources().at(0).getID()); Eigen::GpuDevice device(&stream_device); auto layer_tensor_data = this->getLayerTensor(layer_id); // Sync the model error, node error, and node output if (!this->model_error_->getErrorStatus().second) this->model_error_->syncHAndDError(device); if (!layer_tensor_data->getErrorStatus().second) layer_tensor_data->syncHAndDError(device); if (!layer_tensor_data->getOutputStatus().second) layer_tensor_data->syncHAndDOutput(device); // Calculate the model and node errors model_kernal.executeModelErrors( expected, layer_tensor_data->getHOutputPointer().get(), layer_tensor_data->getDOutputPointer().get(), this->model_error_->getHErrorPointer().get(), this->model_error_->getDErrorPointer().get(), layer_tensor_data->getHErrorPointer().get(), layer_tensor_data->getDErrorPointer().get(), loss_function, loss_function_grad, layer_tensor_data->getBatchSize(), layer_tensor_data->getMemorySize(), layer_tensor_data->getLayerSize(), time_step, device); assert(cudaStreamSynchronize(stream) == cudaSuccess); assert(cudaStreamDestroy(stream) == cudaSuccess); } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::executeModelMetricOperations(Eigen::Tensor<TensorT, 2>& expected, const int & layer_id, std::shared_ptr<MetricFunctionTensorOp<TensorT,Eigen::GpuDevice>> metric_function, const int & time_step, const int & metric_index) { // More performant if all model error calculations were passed at the same time ModelKernalGpu<TensorT> model_kernal; cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, getModelResources().at(0).getID()); Eigen::GpuDevice device(&stream_device); auto layer_tensor_data = this->getLayerTensor(layer_id); // Sync the model metric and node output if (!this->model_error_->getMetricStatus().second) this->model_error_->syncHAndDMetric(device); if (!layer_tensor_data->getOutputStatus().second) layer_tensor_data->syncHAndDOutput(device); // Calculate the model metric model_kernal.executeModelMetric( expected, layer_tensor_data->getHOutputPointer().get(), layer_tensor_data->getDOutputPointer().get(), this->model_error_->getHMetricPointer().get(), this->model_error_->getDMetricPointer().get(), metric_function, layer_tensor_data->getBatchSize(), layer_tensor_data->getMemorySize(), layer_tensor_data->getLayerSize(), this->model_error_->getNMetrics(), time_step, metric_index, device); assert(cudaStreamSynchronize(stream) == cudaSuccess); assert(cudaStreamDestroy(stream) == cudaSuccess); } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::executeWeightErrorOperations() { for (std::vector<OperationTensorStep<TensorT, Eigen::GpuDevice>>& operations_list : this->operation_steps_) { // Set up the device, streams, and kernals ModelKernalGpu<TensorT> model_kernal; assert(cudaSetDevice(this->getModelResources().at(0).getID()) == cudaSuccess); // is this needed? std::vector<cudaStream_t> streams; for (size_t i = 0; i < operations_list.size(); ++i) { cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); streams.push_back(stream); } // execute the forward propogation steps int device_iter = 0; for (OperationTensorStep<TensorT, Eigen::GpuDevice>& operation : operations_list) { Eigen::GpuStreamDevice stream_device(&streams[device_iter], getModelResources().at(0).getID()); Eigen::GpuDevice device(&stream_device); if (!this->layer_tensors_.at(operation.sink_layer.tensor_index)->getErrorStatus().second) this->layer_tensors_.at(operation.sink_layer.tensor_index)->syncHAndDError(device); if (!this->layer_tensors_.at(operation.source_layer.tensor_index)->getInputStatus().second) this->layer_tensors_.at(operation.source_layer.tensor_index)->syncHAndDInput(device); if (!this->layer_tensors_.at(operation.source_layer.tensor_index)->getOutputStatus().second) this->layer_tensors_.at(operation.source_layer.tensor_index)->syncHAndDOutput(device); if (!this->weight_tensors_.at(operation.weight.tensor_index)->getWeightStatus().second) this->weight_tensors_.at(operation.weight.tensor_index)->syncHAndDWeight(device); if (!this->weight_tensors_.at(operation.weight.tensor_index)->getErrorStatus().second) this->weight_tensors_.at(operation.weight.tensor_index)->syncHAndDError(device); model_kernal.executeWeightErrors( this->layer_tensors_.at(operation.sink_layer.tensor_index)->getHErrorPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getDErrorPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDOutputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getHInputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getDInputPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), // [TODO: change to N] operation.sink_layer.integration_weight_grad, this->weight_tensors_.at(operation.weight.tensor_index)->getHWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHErrorPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDErrorPointer().get(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getBatchSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getMemorySize(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), device); if (!this->weight_tensors_.at(operation.weight.tensor_index)->getSharedWeightsStatus().second) this->weight_tensors_.at(operation.weight.tensor_index)->syncHAndDSharedWeights(device); model_kernal.executeSharedWeightErrors( this->weight_tensors_.at(operation.weight.tensor_index)->getHErrorPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDErrorPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHSharedWeightsPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDSharedWeightsPointer().get(), this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), this->weight_tensors_.at(operation.weight.tensor_index)->getNSharedWeights(), device); ++device_iter; } // sync and destroy the streams for (size_t i = 0; i < operations_list.size(); ++i) { assert(cudaStreamSynchronize(streams[i]) == cudaSuccess); assert(cudaStreamDestroy(streams[i]) == cudaSuccess); } } } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::executeWeightUpdateOperations(const int& iter) { for (std::vector<OperationTensorStep<TensorT, Eigen::GpuDevice>>& operations_list : this->operation_steps_) { // Set up the device, streams, and kernals ModelKernalGpu<TensorT> model_kernal; assert(cudaSetDevice(this->getModelResources().at(0).getID()) == cudaSuccess); // is this needed? std::vector<cudaStream_t> streams; for (size_t i = 0; i < operations_list.size(); ++i) { cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); streams.push_back(stream); } // execute the forward propogation steps int device_iter = 0; for (OperationTensorStep<TensorT, Eigen::GpuDevice>& operation : operations_list) { Eigen::GpuStreamDevice stream_device(&streams[device_iter], getModelResources().at(0).getID()); Eigen::GpuDevice device(&stream_device); if (!this->weight_tensors_.at(operation.weight.tensor_index)->getWeightStatus().second) this->weight_tensors_.at(operation.weight.tensor_index)->syncHAndDWeight(device); if (!this->weight_tensors_.at(operation.weight.tensor_index)->getErrorStatus().second) this->weight_tensors_.at(operation.weight.tensor_index)->syncHAndDError(device); if (!this->weight_tensors_.at(operation.weight.tensor_index)->getSolverParamsStatus().second) this->weight_tensors_.at(operation.weight.tensor_index)->syncHAndDSolverParams(device); model_kernal.executeWeightUpdate( this->weight_tensors_.at(operation.weight.tensor_index)->getHWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDWeightPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHSolverParamsPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDSolverParamsPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getHErrorPointer().get(), this->weight_tensors_.at(operation.weight.tensor_index)->getDErrorPointer().get(), operation.weight.solver, this->layer_tensors_.at(operation.source_layer.tensor_index)->getLayerSize(), this->layer_tensors_.at(operation.sink_layer.tensor_index)->getLayerSize(), iter, device); ++device_iter; } // sync and destroy the streams for (size_t i = 0; i < operations_list.size(); ++i) { assert(cudaStreamSynchronize(streams[i]) == cudaSuccess); assert(cudaStreamDestroy(streams[i]) == cudaSuccess); } } } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::allocateModelErrorTensor(const int& batch_size, const int& memory_size, const int& n_metrics) { std::shared_ptr<ModelErrorData<TensorT, Eigen::GpuDevice>> model_error_data = std:: make_shared<ModelErrorDataGpu<TensorT>>(ModelErrorDataGpu<TensorT>()); model_error_data->initModelErrorData(batch_size, memory_size, n_metrics); this->model_error_ = model_error_data; } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::getModelResults(Model<TensorT>& model, const bool& output_nodes, const bool& weights, const bool& model_error, const bool& input_nodes) { // Synchronize all data with the host cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, getModelResources().at(0).getID()); Eigen::GpuDevice device(&stream_device); // sync the weight values if (weights) { for (auto& weight_map : model.getWeightsMap()) { if (weight_map.second->getTensorIndex().size() > 0) { const int tensor_index = std::get<0>(weight_map.second->getTensorIndex()[0]); if (!this->getWeightTensor(tensor_index)->getWeightStatus().first) this->getWeightTensor(tensor_index)->syncHAndDWeight(device); } } } // sync the model error if (model_error) { if (!this->model_error_->getErrorStatus().first) this->model_error_->syncHAndDError(device); if (!this->model_error_->getMetricStatus().first) this->model_error_->syncHAndDMetric(device); } // sync the output node values if (output_nodes) { for (auto& output_node : model.getOutputNodes()) { // NOTE: there is a strange bug where the tensor indices of the output nodes pointer are not updated //const int tensor_index = output_node->getTensorIndex().first; //const int layer_index = output_node->getTensorIndex().second; const int tensor_index = model.getNodesMap().at(output_node->getName())->getTensorIndex().first; if (!this->getLayerTensor(tensor_index)->getOutputStatus().first) this->getLayerTensor(tensor_index)->syncHAndDOutput(device); } } // sync the input node values if (input_nodes) { for (auto& input_node : model.getInputNodes()) { // NOTE: there is a strange bug where the tensor indices of the input nodes pointer are not updated //const int tensor_index = input_node->getTensorIndex().first; //const int layer_index = input_node->getTensorIndex().second; const int tensor_index = model.getNodesMap().at(input_node->getName())->getTensorIndex().first; if (!this->getLayerTensor(tensor_index)->getInputStatus().first) this->getLayerTensor(tensor_index)->syncHAndDInput(device); } } assert(cudaStreamSynchronize(stream) == cudaSuccess); assert(cudaStreamDestroy(stream) == cudaSuccess); // copy out the weight values if (weights) { for (auto& weight_map : model.getWeightsMap()) { if (weight_map.second->getTensorIndex().size() > 0) { const int tensor_index = std::get<0>(weight_map.second->getTensorIndex()[0]); const int layer1_index = std::get<1>(weight_map.second->getTensorIndex()[0]); const int layer2_index = std::get<2>(weight_map.second->getTensorIndex()[0]); weight_map.second->setWeight(this->getWeightTensor(tensor_index)->getWeight()(layer1_index, layer2_index)); } } } // copy out the model error if (model_error) { model.setError(this->model_error_->getError()); model.setMetric(this->model_error_->getMetric()); } // copy out the output node values if (output_nodes) { for (auto& output_node : model.getOutputNodes()) { // NOTE: there is a strange bug where the tensor indices of the output nodes pointer are not updated //const int tensor_index = output_node->getTensorIndex().first; //const int layer_index = output_node->getTensorIndex().second; const int tensor_index = model.getNodesMap().at(output_node->getName())->getTensorIndex().first; const int layer_index = model.getNodesMap().at(output_node->getName())->getTensorIndex().second; output_node->setOutput(this->getLayerTensor(tensor_index)->getOutput().chip(layer_index, 2)); } } // copy out the input node values if (input_nodes) { for (auto& input_node : model.getInputNodes()) { const int tensor_index = model.getNodesMap().at(input_node->getName())->getTensorIndex().first; const int layer_index = model.getNodesMap().at(input_node->getName())->getTensorIndex().second; input_node->setInput(this->getLayerTensor(tensor_index)->getInput().chip(layer_index, 2)); } } } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::checkMemory(const Model<TensorT>& model, const int& batch_size, const int& memory_size) { assert(cudaSetDevice(this->getModelResources().at(0).getID()) == cudaSuccess); // is this needed? // get the device memory size_t free_byte, total_byte; cudaMemGetInfo(&free_byte, &total_byte); // estimate the needed model memory size_t node_mem = model.nodes_.size() * 4 * batch_size * (memory_size + 1) * sizeof(TensorT); // best and worst case scenario estimation of weight, error, and solver parameter sizes size_t weight_mem_best = model.weights_.size() * 3 * 6 * sizeof(TensorT); // assumptions: all fully connected nodes with adam optimizer (6 params) size_t weight_mem_worst = model.weights_.size() * model.weights_.size() * 3 * 6 * sizeof(TensorT); // assumptions: all singly connected nodes with adam optimizer (6 params) //size_t weight_mem = (size_t)((float)weight_mem_best * 0.8 + (float)weight_mem_worst * 0.2); size_t weight_mem = weight_mem_best; assert(free_byte > (node_mem + weight_mem)); } template<typename TensorT> inline void ModelInterpreterGpu<TensorT>::updateSolverParams(const int & param_index, const TensorT & param_factor) { assert(cudaSetDevice(this->getModelResources().at(0).getID()) == cudaSuccess); // is this needed? assert(cudaSetDevice(this->getModelResources().at(0).getID()) == cudaSuccess); // is this needed? std::vector<cudaStream_t> streams; for (size_t i = 0; i < this->weight_tensors_.size(); ++i) { cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope! assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); streams.push_back(stream); } size_t device_iter = 0; for (auto& weight_tensor_data : this->weight_tensors_) { if (weight_tensor_data->getNSolverParams() > 0) { Eigen::GpuStreamDevice stream_device(&streams[device_iter], getModelResources().at(0).getID()); Eigen::GpuDevice device(&stream_device); if (!weight_tensor_data->getSolverParamsStatus().second) weight_tensor_data->syncHAndDSolverParams(device); Eigen::TensorMap<Eigen::Tensor<TensorT, 3>> solver_params(weight_tensor_data->getDSolverParamsPointer().get(), weight_tensor_data->getLayer1Size(), weight_tensor_data->getLayer2Size(), weight_tensor_data->getNSolverParams()); solver_params.chip(param_index, 2).device(device) = solver_params.chip(param_index, 2) * solver_params.chip(param_index, 2).constant(param_factor); ++device_iter; } } // sync and destroy the streams for (size_t i = 0; i < this->weight_tensors_.size(); ++i) { assert(cudaStreamSynchronize(streams[i]) == cudaSuccess); assert(cudaStreamDestroy(streams[i]) == cudaSuccess); } } } CEREAL_REGISTER_TYPE(EvoNet::ModelInterpreterGpu<float>); // TODO: add double, int, etc. #endif #endif //EVONET_MODELINTERPRETERGPU_H<file_sep>#ifndef EVONET_STRINGPARSING_H #define EVONET_STRINGPARSING_H #include <algorithm> #include <vector> #include <string> #include <regex> #include <iostream> #include <cctype> namespace EvoNet { /* @brief Methods for string parsing, tokenization, etc. */ /* @brief Replace tokens in a string Tests: std::string test = RemoveTokens("{postgres list}"); BOOST_TEST_EQUAL(test, "postgres list"); @param[in] string @param[in] tokens Vector of strings @returns string with tokens replaced **/ static std::string ReplaceTokens(const std::string& str, const std::vector<std::string>& tokens, const std::string& replacement) { std::string str_copy = str; for (const std::string& token : tokens) str_copy = std::regex_replace(str_copy, std::regex(token), replacement); return str_copy; } /* @brief Split string into a vector of substrings Tests: std::vector<std::string> test = SplitString("a,b,c,d,e"); std::vector<std::string> check = {"a","b","c","d","e"}; BOOST_TEST_EQUAL(test, check); @param[in] string @param[in] delimiter Token to use to split @returns a vector of strings **/ static std::vector<std::string> SplitString( const std::string& str, const std::string& delimiter) { std::vector<std::string> tokens; std::string str_copy = str; size_t pos = 0; while ((pos = str_copy.find(delimiter)) != std::string::npos) { std::string token = str_copy.substr(0, pos); tokens.push_back(token); str_copy.erase(0, pos + delimiter.length()); } tokens.push_back(str_copy); // the last element return tokens; } /* @brief Replace all whitespaces in a string Tests: std::string test = RemoveWhiteSpaces("A string with \t\t\t a lot of \n\n whitespace\n"); BOOST_TEST_EQUAL(test, "Astringwithalotofwhitespace"); @param[in] string @returns string with tokens replaced **/ static std::string RemoveWhiteSpaces(const std::string& str) { std::string str_nws = str; str_nws.erase( std::remove_if(str_nws.begin(), str_nws.end(), [](unsigned char c) { return std::isspace(c); }), str_nws.end()); return str_nws; } } #endif //EVONET_STRINGPARSING_H<file_sep> # -------------------------------------------------------------------------- # general definitions used for building EvoNet packages set(CPACK_PACKAGE_NAME "EvoNet") set(CPACK_PACKAGE_VENDOR "EvoNet.com") set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "EvoNet - A framework for mass spectrometry") set(CPACK_PACKAGE_VERSION "${OPENMS_PACKAGE_VERSION_MAJOR}.${OPENMS_PACKAGE_VERSION_MINOR}.${OPENMS_PACKAGE_VERSION_PATCH}") set(CPACK_PACKAGE_VERSION_MAJOR "${OPENMS_PACKAGE_VERSION_MAJOR}") set(CPACK_PACKAGE_VERSION_MINOR "${OPENMS_PACKAGE_VERSION_MINOR}") set(CPACK_PACKAGE_VERSION_PATCH "${OPENMS_PACKAGE_VERSION_PATCH}") set(CPACK_PACKAGE_INSTALL_DIRECTORY "EvoNet-${CPACK_PACKAGE_VERSION}") set(CPACK_PACKAGE_DESCRIPTION_FILE ${PROJECT_SOURCE_DIR}/cmake/EvoNetPackageDescriptionFile.cmake) set(CPACK_RESOURCE_FILE_LICENSE ${PROJECT_SOURCE_DIR}/License.txt) set(CPACK_RESOURCE_FILE_WELCOME ${PROJECT_SOURCE_DIR}/cmake/EvoNetPackageResourceWelcomeFile.txt) set(CPACK_RESOURCE_FILE_README ${PROJECT_SOURCE_DIR}/cmake/EvoNetPackageResourceReadme.txt) ########################################################### Fixing dynamic dependencies # Done on Windows via copying external and internal dlls to the install/bin/ folder # Done on Mac via fixup_bundle for the GUI apps (TOPPView, TOPPAS) and via fix_mac_dependencies for the TOPP tools # which recursively gathers dylds, copies them to install/lib/ and sets the install_name of the binaries to @executable_path/../lib # Not done on Linux. Either install systemwide (omit CMAKE_INSTALL_PREFIX or set it to /usr/) or install and add the # install/lib/ folder to the LD_LIBRARY_PATH #install(CODE " # include(BundleUtilities) # GET_BUNDLE_ALL_EXECUTABLES(\${CMAKE_INSTALL_PREFIX}/${INSTALL_BIN_DIR} EXECS) # fixup_bundle(\"${EXECS}\" \"\" \"\${CMAKE_INSTALL_PREFIX}/${INSTALL_LIB_DIR}\") # " COMPONENT applications)<file_sep>Developer Reference Documentation ============================================================================ .. todo:: Various optimisations. Project Layout ---------------------------------------------------------------------------- The brief overview of how the project is constructed: **Core** TODO **Graph** TODO **IO** TODO **ML** TODO **Models** TODO **Simulator** TODO Full Reference ---------------------------------------------------------------------------- .. toctree:: api/library_root<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_WEIGHTINIT_H #define EVONET_WEIGHTINIT_H #include <unsupported/Eigen/CXX11/Tensor> #include <cmath> #include <random> #include <iostream> #include <cereal/access.hpp> // serialiation of private members #undef min // clashes with std::limit on windows in polymorphic.hpp #undef max // clashes with std::limit on windows in polymorphic.hpp #include <cereal/types/polymorphic.hpp> namespace EvoNet { /** @brief Base class for all weight initialization functions */ template<typename TensorT> class WeightInitOp { public: WeightInitOp() = default; ~WeightInitOp() = default; virtual std::string getName() const = 0; virtual TensorT operator()() const = 0; virtual std::string getParamsAsStr() const = 0; virtual WeightInitOp<TensorT>* copy() const = 0; private: friend class cereal::access; template<class Archive> void serialize(Archive& archive){} }; /** @brief Random weight initialization based on the method of He, et al 2015 References: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2000). Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit. Nature. 405. pp. 947–951. */ template<typename TensorT> class RandWeightInitOp: public WeightInitOp<TensorT> { public: RandWeightInitOp(TensorT n = 1.0, TensorT f = 1.0): n_(n), f_(f){}; std::string getName() const{return "RandWeightInitOp";}; TensorT operator()() const { std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution<> d{0.0, 1.0}; const TensorT init_value = TensorT(d(gen)*std::sqrt(f_ / n_)); return init_value; }; TensorT getN() const { return n_; } TensorT getF() const { return f_; } std::string getParamsAsStr() const { std::string params = "n:" + std::to_string(getN()) + ";f:" + std::to_string(getF()); return params; } WeightInitOp<TensorT>* copy() const { return new RandWeightInitOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<WeightInitOp<TensorT>>(this), n_, f_); } TensorT n_ = 1.0; ///< the denominator (i.e., number of input nodes for He et al, or average input/output nodes for Xavior et al) TensorT f_ = 1.0; ///< the numerator (i.e., 2 for He et al, 1 for Xavior et al) }; /** @brief Constant weight initialization. */ template<typename TensorT> class ConstWeightInitOp: public WeightInitOp<TensorT> { public: ConstWeightInitOp(const TensorT& n):n_(n){}; ConstWeightInitOp(){}; ~ConstWeightInitOp(){}; std::string getName() const{return "ConstWeightInitOp";}; TensorT operator()() const { return n_; }; TensorT getN() const {return n_;}; std::string getParamsAsStr() const { std::string params = "n:" + std::to_string(getN()); return params; } WeightInitOp<TensorT>* copy() const { return new ConstWeightInitOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<WeightInitOp<TensorT>>(this), n_); } TensorT n_ = 1.0; ///< the constant to return }; /** @brief Random weight initialization over a lower and upper bound of values */ template<typename TensorT> class RangeWeightInitOp : public WeightInitOp<TensorT> { public: RangeWeightInitOp(TensorT lb = 0.0, TensorT ub = 1.0) : lb_(lb), ub_(ub) {}; std::string getName() const { return "RangeWeightInitOp"; }; TensorT operator()() const { std::random_device rd{}; std::mt19937 gen{ rd() }; std::uniform_real_distribution<> d(lb_, ub_); // define the range return d(gen); }; TensorT getLB() const { return lb_; } TensorT getUB() const { return ub_; } std::string getParamsAsStr() const { std::string params = "lb:" + std::to_string(getLB()) + ";ub:" + std::to_string(getUB()); return params; } WeightInitOp<TensorT>* copy() const { return new RangeWeightInitOp<TensorT>(*this); } private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<WeightInitOp<TensorT>>(this), lb_, ub_); } TensorT lb_ = 0.0; ///< the lower bound TensorT ub_ = 1.0; ///< the upper bound }; } CEREAL_REGISTER_TYPE(EvoNet::RandWeightInitOp<float>); CEREAL_REGISTER_TYPE(EvoNet::ConstWeightInitOp<float>); CEREAL_REGISTER_TYPE(EvoNet::RangeWeightInitOp<float>); //CEREAL_REGISTER_TYPE(EvoNet::RandWeightInitOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::ConstWeightInitOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::RangeWeightInitOp<double>); //CEREAL_REGISTER_TYPE(EvoNet::RandWeightInitOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::ConstWeightInitOp<int>); //CEREAL_REGISTER_TYPE(EvoNet::RangeWeightInitOp<int>); #endif //EVONET_WEIGHTINIT_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ChromatogramSimulator test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/ChromatogramSimulator.h> #include <EvoNet/simulator/PeakSimulator.h> #include <EvoNet/simulator/EMGModel.h> #include <iostream> using namespace EvoNet; using namespace std; template<typename TensorT> class ChromatogramSimulatorExt : public ChromatogramSimulator<TensorT> { public: void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override {}; void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) override {}; void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) override {}; }; BOOST_AUTO_TEST_SUITE(chromatogramsimulator) BOOST_AUTO_TEST_CASE(constructor) { ChromatogramSimulatorExt<double>* ptr = nullptr; ChromatogramSimulatorExt<double>* nullPointer = nullptr; ptr = new ChromatogramSimulatorExt<double>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ChromatogramSimulatorExt<double>* ptr = nullptr; ptr = new ChromatogramSimulatorExt<double>(); delete ptr; } BOOST_AUTO_TEST_CASE(findPeakOverlap) { ChromatogramSimulatorExt<double> chromsimulator; PeakSimulator<double> peak_left, peak_right; EMGModel<double> emg_left, emg_right; // Overlapping windows; left and right baseline are equal; peak_left = PeakSimulator<double>(1.0, 0.0, 0.0, 12.0, 0.0, 0.0, 1.0, 5.0, //bl, br 15); peak_right = PeakSimulator<double>(1.0, 0.0, 8.0, 20.0, 0.0, 0.0, 5.0, 1.0, //bl, br 15); emg_left = EMGModel<double>(10.0, 0.0, 5.0, 1.0); emg_right = EMGModel<double>(10.0, 0.0, 15.0, 1.0); chromsimulator.findPeakOverlap(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(chromsimulator.findPeakOverlap(peak_left, emg_left, peak_right, emg_right), 12.0); // Merged peaks: both baselines overlap peak_left = PeakSimulator<double>(1.0, 0.0, 0.0, 15.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); peak_right = PeakSimulator<double>(1.0, 0.0, 5.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); emg_left = EMGModel<double>(10.0, 0.0, 9.0, 1.0); emg_right = EMGModel<double>(10.0, 0.0, 11.0, 1.0); chromsimulator.findPeakOverlap(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(chromsimulator.findPeakOverlap(peak_left, emg_left, peak_right, emg_right), 10.0); // Merged peaks: both baselines do not overlap peak_left = PeakSimulator<double>(1.0, 0.0, 0.0, 13.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); peak_right = PeakSimulator<double>(1.0, 0.0, 5.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); emg_left = EMGModel<double>(10.0, 0.0, 9.0, 1.0); emg_right = EMGModel<double>(10.0, 0.0, 11.0, 1.0); chromsimulator.findPeakOverlap(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(chromsimulator.findPeakOverlap(peak_left, emg_left, peak_right, emg_right), 10.0); } BOOST_AUTO_TEST_CASE(joinPeakWindows1) { ChromatogramSimulatorExt<double> chromsimulator; PeakSimulator<double> peak_left, peak_right; EMGModel<double> emg_left, emg_right; // Perfect overlap; no differences in baseline peak_left = PeakSimulator<double>(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); peak_right = PeakSimulator<double>(1.0, 0.0, 10.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); emg_left = EMGModel<double>(10.0, 0.0, 5.0, 1.0); emg_right = EMGModel<double>(10.0, 0.0, 15.0, 1.0); chromsimulator.joinPeakWindows(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(peak_left.getBaselineRight(), 1.0); BOOST_CHECK_EQUAL(peak_right.getBaselineLeft(), 1.0); BOOST_CHECK_EQUAL(peak_left.getWindowEnd(), 10.0); BOOST_CHECK_EQUAL(peak_right.getWindowStart(), 10.0); BOOST_CHECK_EQUAL(emg_left.getMu(), 5.0); BOOST_CHECK_EQUAL(emg_right.getMu(), 15.0); // Perfect overlap; no differences in baseline // swapped peaks peak_right = PeakSimulator<double>(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); peak_left = PeakSimulator<double>(1.0, 0.0, 10.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); emg_right = EMGModel<double>(10.0, 0.0, 5.0, 1.0); emg_left = EMGModel<double>(10.0, 0.0, 15.0, 1.0); chromsimulator.joinPeakWindows(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(peak_left.getBaselineRight(), 1.0); BOOST_CHECK_EQUAL(peak_right.getBaselineLeft(), 1.0); BOOST_CHECK_EQUAL(peak_left.getWindowStart(), 0.0); BOOST_CHECK_EQUAL(peak_left.getWindowEnd(), 10.0); BOOST_CHECK_EQUAL(peak_right.getWindowStart(), 10.0); BOOST_CHECK_EQUAL(peak_right.getWindowEnd(), 20.0); BOOST_CHECK_EQUAL(emg_left.getMu(), 5.0); BOOST_CHECK_EQUAL(emg_right.getMu(), 15.0); // Non overlapping windows; Left baseline is higher peak_left = PeakSimulator<double>(1.0, 0.0, 0.0, 8.0, 0.0, 0.0, 1.0, 5.0, //bl, br 15); peak_right = PeakSimulator<double>(1.0, 0.0, 12.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); emg_left = EMGModel<double>(10.0, 0.0, 5.0, 1.0); emg_right = EMGModel<double>(10.0, 0.0, 15.0, 1.0); chromsimulator.joinPeakWindows(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(peak_left.getBaselineRight(), 5.0); BOOST_CHECK_EQUAL(peak_right.getBaselineLeft(), 5.0); BOOST_CHECK_EQUAL(peak_left.getWindowEnd(), 12.0); BOOST_CHECK_EQUAL(peak_right.getWindowStart(), 12.0); BOOST_CHECK_EQUAL(emg_left.getMu(), 5.0); BOOST_CHECK_EQUAL(emg_right.getMu(), 15.0); // Non overlapping windows; Right baseline is higher peak_left = PeakSimulator<double>(1.0, 0.0, 0.0, 8.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); peak_right = PeakSimulator<double>(1.0, 0.0, 12.0, 20.0, 0.0, 0.0, 5.0, 1.0, //bl, br 15); emg_left = EMGModel<double>(10.0, 0.0, 5.0, 1.0); emg_right = EMGModel<double>(10.0, 0.0, 15.0, 1.0); chromsimulator.joinPeakWindows(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(peak_left.getBaselineRight(), 5.0); BOOST_CHECK_EQUAL(peak_right.getBaselineLeft(), 5.0); BOOST_CHECK_EQUAL(peak_left.getWindowEnd(), 12.0); BOOST_CHECK_EQUAL(peak_right.getWindowStart(), 12.0); BOOST_CHECK_EQUAL(emg_left.getMu(), 5.0); BOOST_CHECK_EQUAL(emg_right.getMu(), 15.0); // Overlapping windows; Left baseline is higher peak_left = PeakSimulator<double>(1.0, 0.0, 0.0, 12.0, 0.0, 0.0, 1.0, 5.0, //bl, br 15); peak_right = PeakSimulator<double>(1.0, 0.0, 8.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); emg_left = EMGModel<double>(10.0, 0.0, 5.0, 1.0); emg_right = EMGModel<double>(10.0, 0.0, 15.0, 1.0); chromsimulator.joinPeakWindows(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(peak_left.getBaselineRight(), 5.0); BOOST_CHECK_EQUAL(peak_right.getBaselineLeft(), 5.0); BOOST_CHECK_EQUAL(peak_left.getWindowEnd(), 12.0); BOOST_CHECK_EQUAL(peak_right.getWindowStart(), 12.0); BOOST_CHECK_EQUAL(emg_left.getMu(), 5.0); BOOST_CHECK_EQUAL(emg_right.getMu(), 15.0); // Overlapping windows; Right baseline is higher peak_left = PeakSimulator<double>(1.0, 0.0, 0.0, 12.0, 0.0, 0.0, 1.0, 1.0, //bl, br 15); peak_right = PeakSimulator<double>(1.0, 0.0, 8.0, 20.0, 0.0, 0.0, 5.0, 1.0, //bl, br 15); emg_left = EMGModel<double>(10.0, 0.0, 5.0, 1.0); emg_right = EMGModel<double>(10.0, 0.0, 15.0, 1.0); chromsimulator.joinPeakWindows(peak_left, emg_left, peak_right, emg_right); BOOST_CHECK_EQUAL(peak_left.getBaselineRight(), 5.0); BOOST_CHECK_EQUAL(peak_right.getBaselineLeft(), 5.0); BOOST_CHECK_EQUAL(peak_left.getWindowEnd(), 12.0); BOOST_CHECK_EQUAL(peak_right.getWindowStart(), 12.0); BOOST_CHECK_EQUAL(emg_left.getMu(), 5.0); BOOST_CHECK_EQUAL(emg_right.getMu(), 15.0); } BOOST_AUTO_TEST_CASE(makeChromatogram) { ChromatogramSimulatorExt<double> chromsimulator; PeakSimulator<double> peak1, peak2, peak3; EMGModel<double> emg1, emg2, emg3; std::vector<double> chrom_time, chrom_intensity, x_test, y_test; std::vector<std::pair<double, double>> best_lr, best_lr_test; std::vector<double> peak_apices, peak_apices_test; std::vector<PeakSimulator<double>> peaks; std::vector<EMGModel<double>> emgs; // Perfect gaussian peak peak1 = PeakSimulator<double>(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg1 = EMGModel<double>(10.0, 0.0, 5.0, 1.0); peaks = {peak1}; emgs = {emg1}; chromsimulator.makeChromatogram(chrom_time, chrom_intensity, best_lr, peak_apices, peaks, emgs); x_test = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; y_test = {1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1}; for (int i=0; i<chrom_time.size(); ++i) { BOOST_CHECK_CLOSE(chrom_time[i], x_test[i], 1e-3); BOOST_CHECK_CLOSE(chrom_intensity[i], y_test[i], 1e-3); } best_lr_test = { std::make_pair(2,8) }; for (int i = 0; i < best_lr_test.size(); ++i) { BOOST_CHECK_CLOSE(best_lr[i].first, best_lr_test[i].first, 1e-3); BOOST_CHECK_CLOSE(best_lr[i].second, best_lr_test[i].second, 1e-3); } peak_apices_test = { 5 }; for (int i = 0; i < peak_apices_test.size(); ++i) { BOOST_CHECK_CLOSE(peak_apices[i], peak_apices_test[i], 1e-3); } // Perfect gaussian peaks peak1 = PeakSimulator<double>(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg1 = EMGModel<double>(10.0, 0.0, 5.0, 1.0); peak2 = PeakSimulator<double>(1.0, 0.0, 10.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg2 = EMGModel<double>(10.0, 0.0, 15.0, 1.0); peak3 = PeakSimulator<double>(1.0, 0.0, 20.0, 30.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg3 = EMGModel<double>(10.0, 0.0, 25.0, 1.0); peaks = {peak1, peak2, peak3}; emgs = {emg1, emg2, emg3}; chromsimulator.makeChromatogram(chrom_time, chrom_intensity, best_lr, peak_apices, peaks, emgs); x_test = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}; y_test = {1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1, 1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1, 1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1}; for (int i=0; i<chrom_time.size(); ++i) { BOOST_CHECK_CLOSE(chrom_time[i], x_test[i], 1e-3); BOOST_CHECK_CLOSE(chrom_intensity[i], y_test[i], 1e-3); } best_lr_test = { std::make_pair(2,8),std::make_pair(12,18),std::make_pair(22,28) }; for (int i = 0; i < best_lr_test.size(); ++i) { BOOST_CHECK_CLOSE(best_lr[i].first, best_lr_test[i].first, 1e-3); BOOST_CHECK_CLOSE(best_lr[i].second, best_lr_test[i].second, 1e-3); } peak_apices_test = { 5, 15, 25 }; for (int i = 0; i < peak_apices_test.size(); ++i) { BOOST_CHECK_CLOSE(peak_apices[i], peak_apices_test[i], 1e-3); } // Increase tailing peak1 = PeakSimulator<double>(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg1 = EMGModel<double>(10.0, 0.0, 5.0, 1.0); peak2 = PeakSimulator<double>(1.0, 0.0, 10.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg2 = EMGModel<double>(10.0, 0.2, 15.0, 1.0); peak3 = PeakSimulator<double>(1.0, 0.0, 20.0, 30.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg3 = EMGModel<double>(10.0, 1.0, 25.0, 1.0); peaks = {peak1, peak2, peak3}; emgs = {emg1, emg2, emg3}; chromsimulator.makeChromatogram(chrom_time, chrom_intensity, best_lr, peak_apices, peaks, emgs); x_test = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}; y_test = {1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1, 1, 1, 1, 1, 4.92435, 9.64041, 7.17685, 2.06109, 1, 1, 1, 1, 1, 1, 1, 2.55573, 6.5568, 7.60173, 4.70568, 2.01076, 1, 1}; for (int i=0; i<chrom_time.size(); ++i) { BOOST_CHECK_CLOSE(chrom_time[i], x_test[i], 1e-3); BOOST_CHECK_CLOSE(chrom_intensity[i], y_test[i], 1e-3); } best_lr_test = { std::make_pair(2,8),std::make_pair(13,18),std::make_pair(23,29) }; for (int i = 0; i < best_lr_test.size(); ++i) { BOOST_CHECK_CLOSE(best_lr[i].first, best_lr_test[i].first, 1e-3); BOOST_CHECK_CLOSE(best_lr[i].second, best_lr_test[i].second, 1e-3); } peak_apices_test = { 5, 15, 25 }; for (int i = 0; i < peak_apices_test.size(); ++i) { BOOST_CHECK_CLOSE(peak_apices[i], peak_apices_test[i], 1e-3); } // Overlap and cutoff peak peak1 = PeakSimulator<double>(1.0, 0.0, 0.0, 10.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg1 = EMGModel<double>(10.0, 0.0, 5.0, 1.0); peak2 = PeakSimulator<double>(1.0, 0.0, 10.0, 20.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg2 = EMGModel<double>(10.0, 0.0, 7.0, 1.0); peak3 = PeakSimulator<double>(1.0, 0.0, 20.0, 30.0, 0.0, 0.0, 1.0, 1.0, //bl, br 100); emg3 = EMGModel<double>(10.0, 0.0, 29.0, 1.0); peaks = {peak1, peak2, peak3}; emgs = {emg1, emg2, emg3}; chromsimulator.makeChromatogram(chrom_time, chrom_intensity, best_lr, peak_apices, peaks, emgs); x_test = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}; y_test = {1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1.35335, 6.06531, 10, 6.06531}; for (int i=0; i<chrom_time.size(); ++i) { BOOST_CHECK_CLOSE(chrom_time[i], x_test[i], 1e-3); BOOST_CHECK_CLOSE(chrom_intensity[i], y_test[i], 1e-3); } best_lr_test = { std::make_pair(2,8),std::make_pair(26,30) }; for (int i = 0; i < best_lr_test.size(); ++i) { BOOST_CHECK_CLOSE(best_lr[i].first, best_lr_test[i].first, 1e-3); BOOST_CHECK_CLOSE(best_lr[i].second, best_lr_test[i].second, 1e-3); } peak_apices_test = { 5, 7, 29 }; for (int i = 0; i < peak_apices_test.size(); ++i) { BOOST_CHECK_CLOSE(peak_apices[i], peak_apices_test[i], 1e-3); } // // UNCOMMENT to print out new test values // for (int i=0; i<chrom_time.size(); ++i) // { // std::cout<< chrom_time[i] << " " << chrom_intensity[i] <<std::endl; // } // std::cout<< ";" <<std::endl; // for (int i=0; i<chrom_time.size(); ++i) // { // std::cout<< chrom_time[i] << ", "; // } // std::cout<< ";" <<std::endl; // for (int i=0; i<chrom_intensity.size(); ++i) // { // std::cout<< chrom_intensity[i] << ", "; // } // std::cout<< ";" <<std::endl; } BOOST_AUTO_TEST_CASE(simulateChromatogram) { ChromatogramSimulatorExt<double> chromsimulator; std::vector<double> chrom_time, chrom_intensity, chrom_time_noise, chrom_intensity_noise, x_test, y_test; std::vector<std::pair<double, double>> best_lr, best_lr_test; std::vector<double> peak_apices, peak_apices_test; std::vector<EMGModel<double>> emgs; chromsimulator.simulateChromatogram(chrom_time, chrom_intensity, chrom_time_noise, chrom_intensity_noise, best_lr, peak_apices, emgs, std::make_pair(1.0, 1.0), std::make_pair(0.1, 0.2), std::make_pair(10.0, 10.0), std::make_pair(0.1, 0.2), std::make_pair(0.1, 0.2), std::make_pair(1.0, 1.0), std::make_pair(1.0, 1.0), std::make_pair(10.0, 10.0), std::make_pair(0.0, 0.0), std::make_pair(0.0, 0.0), std::make_pair(1.0, 1.0) ); x_test = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; y_test = { 1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1 }; for (int i = 0; i < chrom_time.size(); ++i) { BOOST_CHECK_CLOSE(chrom_time[i], x_test[i], 1e-3); BOOST_CHECK_CLOSE(chrom_intensity[i], y_test[i], 1e-3); } best_lr_test = { std::make_pair(2,8) }; for (int i = 0; i < best_lr_test.size(); ++i) { BOOST_CHECK_CLOSE(best_lr[i].first, best_lr_test[i].first, 1e-3); BOOST_CHECK_CLOSE(best_lr[i].second, best_lr_test[i].second, 1e-3); } peak_apices_test = { 5 }; for (int i = 0; i < peak_apices_test.size(); ++i) { BOOST_CHECK_CLOSE(peak_apices[i], peak_apices_test[i], 1e-3); } BOOST_CHECK_EQUAL(emgs.size(), 1); // Perfect gaussian peaks chromsimulator.simulateChromatogram(chrom_time, chrom_intensity, chrom_time_noise, chrom_intensity_noise, best_lr, peak_apices, emgs, std::make_pair(1.0, 1.0), std::make_pair(0.1, 0.2), std::make_pair(30.0, 30.0), std::make_pair(0.1, 0.2), std::make_pair(0.1, 0.2), std::make_pair(1.0, 1.0), std::make_pair(3.0, 3.0), std::make_pair(10.0, 10.0), std::make_pair(0.0, 0.0), std::make_pair(0.0, 0.0), std::make_pair(1.0, 1.0) ); x_test = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 }; y_test = { 1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1, 1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1, 1, 1, 1, 1.35335, 6.06531, 10, 6.06531, 1.35335, 1, 1, 1 }; for (int i = 0; i < chrom_time.size(); ++i) { BOOST_CHECK_CLOSE(chrom_time[i], x_test[i], 1e-3); BOOST_CHECK_CLOSE(chrom_intensity[i], y_test[i], 1e-3); } best_lr_test = { std::make_pair(2,8),std::make_pair(12,18),std::make_pair(22,28) }; for (int i = 0; i < best_lr_test.size(); ++i) { BOOST_CHECK_CLOSE(best_lr[i].first, best_lr_test[i].first, 1e-3); BOOST_CHECK_CLOSE(best_lr[i].second, best_lr_test[i].second, 1e-3); } peak_apices_test = { 5, 15, 25 }; for (int i = 0; i < peak_apices_test.size(); ++i) { BOOST_CHECK_CLOSE(peak_apices[i], peak_apices_test[i], 1e-3); } BOOST_CHECK_EQUAL(emgs.size(), 3); // Random tailing chromsimulator.simulateChromatogram(chrom_time, chrom_intensity, chrom_time_noise, chrom_intensity_noise, best_lr, peak_apices, emgs, std::make_pair(1.0, 1.0), std::make_pair(0.1, 0.2), std::make_pair(30.0, 30.0), std::make_pair(0.1, 0.2), std::make_pair(0.1, 0.2), std::make_pair(1.0, 1.0), std::make_pair(3.0, 3.0), std::make_pair(10.0, 10.0), std::make_pair(0.0, 1.0), std::make_pair(0.0, 0.0), std::make_pair(1.0, 1.0) ); BOOST_CHECK_EQUAL(chrom_time.size(), 33); BOOST_CHECK_EQUAL(chrom_intensity.size(), 33); BOOST_CHECK_EQUAL(best_lr.size(), 3); BOOST_CHECK_EQUAL(emgs.size(), 3); // Random peak offset chromsimulator.simulateChromatogram(chrom_time, chrom_intensity, chrom_time_noise, chrom_intensity_noise, best_lr, peak_apices, emgs, std::make_pair(1.0, 1.0), std::make_pair(0.1, 0.2), std::make_pair(30.0, 30.0), std::make_pair(0.1, 0.2), std::make_pair(0.1, 0.2), std::make_pair(1.0, 1.0), std::make_pair(3.0, 3.0), std::make_pair(10.0, 10.0), std::make_pair(0.0, 0.0), std::make_pair(0.0, 5.0), std::make_pair(1.0, 1.0) ); BOOST_CHECK_EQUAL(chrom_time.size(), 33); BOOST_CHECK_EQUAL(chrom_intensity.size(), 33); BOOST_CHECK_EQUAL(best_lr.size(), 3); BOOST_CHECK_EQUAL(emgs.size(), 3); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelBuilder.h> // Input only #include <EvoNet/ml/ModelBuilderExperimental.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/simulator/BiochemicalReaction.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; template<typename TensorT> class DataSimulatorExt : public DataSimulator<TensorT> { public: void simulateData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = output_data.dimension(2); const int n_epochs = input_data.dimension(3); //node_name conc //13dpg 0.00024 //2pg 0.0113 //3pg 0.0773 //adp 0.29 //amp 0.0867 //atp 1.6 //dhap 0.16 //f6p 0.0198 //fdp 0.0146 //g3p 0.00728 //g6p 0.0486 //glc__D 1 //h 1.00E-03 //h2o 1 //lac__L 1.36 //nad 0.0589 //nadh 0.0301 //pep 0.017 //pi 2.5 //pyr 0.0603 //GAPD_reverse 1 //PGK_reverse 1 //ENO 1 //ADK1 1 //PGM 1 //ADK1_reverse 1 //PGK 1 //ATPh 1 //PGM_reverse 1 //DM_nadh 1 //ENO_reverse 1 //FBA 1 //FBA_reverse 1 //GAPD 1 //HEX1 1 //LDH_L 1 //LDH_L_reverse 1 //PFK 1 //PGI 1 //PGI_reverse 1 //PYK 1 //TPI_reverse 1 //TPI 1 std::vector<std::string> metabolites = { "13dpg","2pg","3pg","adp","amp","atp","dhap","f6p","fdp","g3p","g6p","glc__D","h","h2o","lac__L","nad","nadh","pep","pi","pyr" }; std::vector<std::string> enzymes = { "ADK1","ADK1_reverse","ATPh","DM_nadh","ENO","ENO_reverse","FBA","FBA_reverse","GAPD","GAPD_reverse","HEX1","LDH_L","LDH_L_reverse","PFK","PGI","PGI_reverse","PGK","PGK_reverse","PGM","PGM_reverse","PYK","TPI","TPI_reverse" }; std::vector<TensorT> met_data_stst = { 0.00024,0.0113,0.0773,0.29,0.0867,1.6,0.16,0.0198,0.0146,0.00728,0.0486,1,1.00e-03,1,1.36,0.0589,0.0301,0.017,2.5,0.0603 }; const int n_data = batch_size * n_epochs; Eigen::Tensor<TensorT, 2> glu__D_rand = GaussianSampler<TensorT>(1, n_data); glu__D_rand = (glu__D_rand + glu__D_rand.constant(1)) * glu__D_rand.constant(10); Eigen::Tensor<TensorT, 2> amp_rand = GaussianSampler<TensorT>(1, n_data); amp_rand = (amp_rand + amp_rand.constant(1)) * amp_rand.constant(5); // Generate the input and output data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int epochs_iter = 0; epochs_iter < n_epochs; ++epochs_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_nodes; ++nodes_iter) { if (simulation_type_ == "glucose_pulse") { if (nodes_iter > 19) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 1; // enzymes default else if (nodes_iter != 11 && memory_iter > memory_size - 4) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = met_data_stst[nodes_iter]; else if (nodes_iter == 11 && memory_iter > memory_size - 4) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = glu__D_rand(0, batch_iter*n_epochs + epochs_iter); else input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0; // metabolites default } else if (simulation_type_ == "amp_sweep") { if (nodes_iter > 19) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 1; // enzymes default else if (nodes_iter != 4 && memory_iter > memory_size - 4) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = met_data_stst[nodes_iter]; else if (nodes_iter == 4 && memory_iter > memory_size - 4) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = amp_rand(0, batch_iter*n_epochs + epochs_iter); else input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0; // metabolites default } else if (simulation_type_ == "steady_state") { if (nodes_iter > 19 && memory_iter > memory_size - 4) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 1e-6; // enzymes default //else if (nodes_iter != 11 && memory_iter > memory_size - 4) // input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = met_data_stst[nodes_iter]; else if (nodes_iter == 11 && memory_iter > memory_size - 4) input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = met_data_stst[nodes_iter]; else input_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0; // metabolites default } } for (int nodes_iter = 0; nodes_iter < n_output_nodes; ++nodes_iter) { if (simulation_type_ == "glucose_pulse") { if (memory_iter == 0) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = met_data_stst[nodes_iter]; else output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0; // NOTE: TETT of 1 } else if (simulation_type_ == "amp_sweep") { if (memory_iter == 0) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = met_data_stst[nodes_iter]; else output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0; // NOTE: TETT of 1 } else if (simulation_type_ == "steady_state") if (memory_iter == 0) output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = met_data_stst[nodes_iter]; else output_data(batch_iter, memory_iter, nodes_iter, epochs_iter) = 0; // NOTE: TETT of 1 } } } } time_steps.setConstant(1.0f); } void simulateTrainingData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { simulateData(input_data, output_data, time_steps); } void simulateValidationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 4>& output_data, Eigen::Tensor<TensorT, 3>& time_steps) { simulateData(input_data, output_data, time_steps); } void simulateEvaluationData(Eigen::Tensor<TensorT, 4>& input_data, Eigen::Tensor<TensorT, 3>& time_steps) {}; // Custom parameters std::string simulation_type_ = "steady_state"; ///< simulation types of steady_state, glucose_pulse, or amp_sweep }; // Extended classes template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: void makeRBCGlycolysis(Model<TensorT>& model, const std::string& biochem_rxns_filename, const bool& specify_layers, const bool& specify_output_layers, const bool& preserve_OoO) { model.setId(0); model.setName("RBCGlycolysis"); ModelBuilder<TensorT> model_builder; // Convert the COBRA model to an interaction graph BiochemicalReactionModel<TensorT> biochemical_reaction_model; biochemical_reaction_model.readBiochemicalReactions(biochem_rxns_filename); // Convert the interaction graph to a network model ModelBuilderExperimental<TensorT> model_builder_exp; model_builder_exp.addBiochemicalReactionsSequencialMin(model, biochemical_reaction_model.biochemicalReactions_, "RBC", "RBC", std::shared_ptr<WeightInitOp<float>>(new RangeWeightInitOp<float>(1e-3, 1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8)), 2, specify_layers, true); //// Create biases for exchange reactions //std::vector<std::string> exchange_nodes_neg = { "lac__L", "pyr", "h" }; //model_builder.addBiases(model, "Sinks", exchange_nodes_neg, // std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(-1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8)), // 0.0, specify_layers); //std::vector<std::string> exchange_nodes_pos = { "glc__D", "h2o", "amp" }; //model_builder.addBiases(model, "Sinks", exchange_nodes_pos, // std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::make_shared<AdamOp<float>>(AdamOp<float>(0.001, 0.9, 0.999, 1e-8)), // 0.0, specify_layers); std::set<std::string> exchange_nodes = { "lac__L_e", "pyr_e", "h_e", "glc__D_e", "h2o_e", "amp_e" }; std::set<std::string> output_nodes = { "13dpg_c","2pg_c","3pg_c","adp_c","amp_c","atp_c","dhap_c","f6p_c","fdp_c","g3p_c","g6p_c","glc__D_c","h_c","h2o_c","lac__L_c","nad_c","nadh_c","pep_c","pi_c","pyr_c" }; std::set<std::string> enzymes_f_nodes = { "ENO","FBA","GAPD","HEX1","LDH_L","PFK","PGI","PGK","PGM","PYK","TPI","DM_nadh","ADK1","ATPh" }; std::set<std::string> enzymes_r_nodes; for (const std::string& node : enzymes_f_nodes) { std::string node_r = node + "_reverse"; enzymes_r_nodes.insert(node_r); } // Create a dummy input node for all metabolites and enzymes (OoO) if (preserve_OoO) { std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", 1, specify_layers); for (const std::string& node : output_nodes) { model_builder.addSinglyConnected(model, "Input", node_names, { node }, std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::shared_ptr<SolverOp<float>>(new DummySolverOp<float>()), 0.0, specify_layers); } for (const std::string& node : enzymes_f_nodes) { model_builder.addSinglyConnected(model, "Input", node_names, { node }, std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::shared_ptr<SolverOp<float>>(new DummySolverOp<float>()), 0.0, specify_layers); } for (const std::string& node : enzymes_r_nodes) { if (model.nodes_.count(node)) { model_builder.addSinglyConnected(model, "Input", node_names, { node }, std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)), std::shared_ptr<SolverOp<float>>(new DummySolverOp<float>()), 0.0, specify_layers); } } } // Specify the output layer for all nodes if (specify_output_layers) { // specify metabolite and enzymes for (const std::string& node : output_nodes) { model.nodes_.at(node)->setLayerName("Metabolites"); model.nodes_.at(node)->setType(NodeType::output); } for (const std::string& node : exchange_nodes) { model.nodes_.at(node)->setLayerName("Exchange"); } for (const std::string& node : enzymes_f_nodes) { model.nodes_.at(node)->setLayerName("Enzymes"); } for (const std::string& node : enzymes_r_nodes) { if (model.nodes_.count(node)) { model.nodes_.at(node)->setLayerName("Enzymes"); } } } if (specify_layers) { // Specify the intermediates for (auto& node : model.getNodesMap()) { if (output_nodes.count(node.second->getName()) == 0 && enzymes_f_nodes.count(node.second->getName()) == 0 && enzymes_r_nodes.count(node.second->getName()) == 0) { if (node.second->getLayerName() == "RBC-EnzTmp1") { node.second->setLayerName("EnzTmp1"); } else if (node.second->getLayerName() == "RBC-EnzTmp2") { node.second->setLayerName("EnzTmp2"); } else { node.second->setLayerName("tmpResult"); } } } } } void adaptiveTrainerScheduler( const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<float>& model_errors) { // Check point the model every 1000 epochs if (n_epochs % 500 == 0 && n_epochs != 0) { model_interpreter.getModelResults(model, false, true, false, false); ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } // Record the nodes/links if (n_epochs % 100 == 0 || n_epochs == 0) { ModelFile<TensorT> data; model_interpreter.getModelResults(model, false, true, false, false); data.storeModelCsv(model.getName() + "_" + std::to_string(n_epochs) + "_nodes.csv", model.getName() + "_" + std::to_string(n_epochs) + "_links.csv", model.getName() + "_" + std::to_string(n_epochs) + "_weights.csv", model, true, true, true); } } void trainingModelLogger(const int & n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const TensorT& model_error) { model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(true); if (n_epochs == 0) { model_logger.initLogs(model); } if (n_epochs % 10 == 0) { if (model_logger.getLogExpectedEpoch()) model_interpreter.getModelResults(model, true, false, false); model_logger.writeLogs(model, n_epochs, { "Error" }, {}, { model_error }, {}, output_nodes, expected_values); } } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> { public: void adaptiveReplicatorScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // TODO } }; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> { public: void adaptivePopulationScheduler( const int& n_generations, std::vector<Model<TensorT>>& models, std::vector<std::vector<std::tuple<int, std::string, TensorT>>>& models_errors_per_generations) { // TODO } }; void main_KineticModel(const bool& make_model, const bool& train_model, const std::string& simulation_type) { // define the population trainer parameters PopulationTrainerExt<float> population_trainer; population_trainer.setNGenerations(1); population_trainer.setLogging(false); // define the population logger PopulationLogger<float> population_logger(true, true); // define the multithreading parameters const int n_hard_threads = std::thread::hardware_concurrency(); const int n_threads = n_hard_threads; // the number of threads // define the output nodes // TODO: manually specify the tensor index ordering or update for correct tensor ordering std::vector<std::string> output_nodes = { "13dpg_c","2pg_c","3pg_c","adp_c","amp_c","atp_c","dhap_c","f6p_c","fdp_c","g3p_c","g6p_c","glc__D_c","h_c","h2o_c","lac__L_c","nad_c","nadh_c","pep_c","pi_c","pyr_c" }; // define the data simulator DataSimulatorExt<float> data_simulator; data_simulator.simulation_type_ = simulation_type; // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(0, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; //model_trainer.setBatchSize(32); //model_trainer.setMemorySize(128); model_trainer.setBatchSize(1); model_trainer.setMemorySize(91); model_trainer.setNEpochsTraining(5001); model_trainer.setNEpochsValidation(25); model_trainer.setNTETTSteps(1); model_trainer.setNTBPTTSteps(15); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true, false); // NonOoO model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setPreserveOoO(false); // // OoO //model_trainer.setFindCycles(false); // manually specifying the cycles //model_trainer.setFastInterpreter(true); //model_trainer.setPreserveOoO(true); model_trainer.setLossFunctions({ std::make_shared<MSELossOp<float>>(MSELossOp<float>()) }); model_trainer.setLossFunctionGrads({ std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>()) }); model_trainer.setLossOutputNodes({ output_nodes }); // define the model logger //ModelLogger<float> model_logger(true, true, true, false, false, false, false); ModelLogger<float> model_logger(true, true, false, false, false, false, false); // define the model replicator for growth mode ModelReplicatorExt<float> model_replicator; model_replicator.setNodeActivations({ std::make_pair(std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>())), std::make_pair(std::make_shared<SigmoidOp<float>>(SigmoidOp<float>()), std::make_shared<SigmoidGradOp<float>>(SigmoidGradOp<float>())), }); // define the initial population std::cout << "Initializing the population..." << std::endl; Model<float> model; if (make_model) { const std::string data_dir = "C:/Users/dmccloskey/Dropbox (UCSD SBRG)/Project_EvoNet/"; //const std::string data_dir = "C:/Users/domccl/Dropbox (UCSD SBRG)/Project_EvoNet/"; const std::string model_filename = data_dir + "RBCGlycolysis.csv"; ModelTrainerExt<float>().makeRBCGlycolysis(model, model_filename, true, true, false); } else { // read in the trained model std::cout << "Reading in the model..." << std::endl; const std::string data_dir = "C:/Users/domccl/GitHub/EvoNet_cpp/build_win_cuda/bin/Debug/"; const std::string model_filename = data_dir + "0_RBCGlycolysis_model.binary"; const std::string interpreter_filename = data_dir + "0_RBCGlycolysis_interpreter.binary"; ModelFile<float> model_file; model_file.loadModelBinary(model_filename, model); model.setId(1); model.setName("RBCGlycolysis-1"); ModelInterpreterFileDefaultDevice<float> model_interpreter_file; model_interpreter_file.loadModelInterpreterBinary(interpreter_filename, model_interpreters[0]); } std::vector<Model<float>> population = { model }; // define the input nodes std::vector<std::string> input_nodes = { "13dpg_c","2pg_c","3pg_c","adp_c","amp_c","atp_c","dhap_c","f6p_c","fdp_c","g3p_c","g6p_c","glc__D_c","h_c","h2o_c","lac__L_c","nad_c","nadh_c","pep_c","pi_c","pyr_c" }; //std::vector<std::string> enzymes_nodes = { "ADK1","ADK1_reverse","ATPh","DM_nadh","ENO","ENO_reverse","FBA","FBA_reverse","GAPD","GAPD_reverse","HEX1","LDH_L","LDH_L_reverse","PFK","PGI","PGI_reverse","PGK","PGK_reverse","PGM","PGM_reverse","PYK","TPI","TPI_reverse" }; //for (const std::string& node : enzymes_nodes) { // input_nodes.push_back(node); //} for (auto& node : model.nodes_) { if (std::count(input_nodes.begin(), input_nodes.end(), node.second->getName()) == 0) { input_nodes.push_back(node.second->getName()); } } if (train_model) { // Evolve the population std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); PopulationTrainerFile<float> population_trainer_file; population_trainer_file.storeModels(population, "RBCGlycolysis"); population_trainer_file.storeModelValidations("RBCGlycolysisErrors.csv", models_validation_errors_per_generation); } else { // Evaluate the population population_trainer.evaluateModels( population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, input_nodes); } } // Main int main(int argc, char** argv) { main_KineticModel(true, true, "steady_state"); // Constant glucose from T = 0 to N, SS metabolite levels at T = 0 (maintenance of SS metabolite levels) //main_KineticModel(true, true, "glucose_pulse"); // Glucose pulse at T = 0, SS metabolite levels at T = 0 (maintenance of SS metabolite) //main_KineticModel(true, true, "amp_sweep"); // AMP rise/fall at T = 0, SS metabolite levels at T = 0 (maintenance of SS metbolite levels) //main_KineticModel(true, true, "TODO?"); // Glucose pulse at T = 0, SS metabolite levels at T = 0 (maintenance of SS pyr levels) //main_KineticModel(true, true, "TODO?"); // AMP rise/fall at T = 0, SS metabolite levels at T = 0 (maintenance of SS ATP levels) return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELTRAINERGPU_H #define EVONET_MODELTRAINERGPU_H #if COMPILE_WITH_CUDA #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include <cuda.h> #include <cuda_runtime.h> // .h #include <EvoNet/ml/ModelTrainer.h> #include <EvoNet/ml/ModelInterpreterGpu.h> // .cpp namespace EvoNet { /** @brief Class to train a network model */ template<typename TensorT> class ModelTrainerGpu : public ModelTrainer<TensorT, ModelInterpreterGpu<TensorT>> { public: ModelTrainerGpu() = default; ///< Default constructor ~ModelTrainerGpu() = default; ///< Default destructor }; } #endif #endif //EVONET_MODELTRAINERGPU_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE PopulationTrainerFile test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/ml/PopulationTrainer.h> #include <EvoNet/ml/Model.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(populationTrainerFile) BOOST_AUTO_TEST_CASE(constructor) { PopulationTrainerFile<float>*ptr = nullptr; PopulationTrainerFile<float>*nullPointer = nullptr; ptr = new PopulationTrainerFile<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { PopulationTrainerFile<float>*ptr = nullptr; ptr = new PopulationTrainerFile<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(sanitizeModelName) { PopulationTrainerFile<float> data; std::string model_name = "model2_0-12-1@:Model_2 "; data.sanitizeModelName(model_name); BOOST_CHECK_EQUAL(model_name, "model2_0-12-1@ Model_2 "); } BOOST_AUTO_TEST_CASE(storeModels) { PopulationTrainerFile<float> data; // make a vector of models to use for testing std::vector<Model<float>> models; for (int i=0; i<4; ++i) { Model<float> model; model.setName(std::to_string(i) + ":" + "." + ";"); model.setId(i); models.push_back(model); } bool success = data.storeModels(models, "PopulationTrainerFileTestStore"); BOOST_CHECK(success); } BOOST_AUTO_TEST_CASE(storeModelValidations) { PopulationTrainerFile<float> data; // make a vector of models to use for testing std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors; for (int g = 0; g < 3; ++g) { std::vector<std::tuple<int, std::string, float>> model_validation_errors; for (int i = 0; i < 4; ++i) { model_validation_errors.push_back(std::make_tuple(i, std::to_string(i), float(i))); } models_validation_errors.push_back(model_validation_errors); } bool success = data.storeModelValidations("StoreModelValidationsTest.csv", models_validation_errors); BOOST_CHECK(success); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelInterpreter IG test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> using namespace EvoNet; using namespace std; Model<float> makeModelIG() { /** * Interaction Graph Toy Network Model * Harmonic Oscillator without damping: * F(t) - kx = mx`` * for F(t) = 0 * x(t) = A*cos(w*t + e) * where undamped angular momentum, w = sqrt(k/m) * with amplitude A, and phase e * * Harmonic Oscillator with damping: * F(t) - kx - cx` = mx`` * For F(t) = 0, x`` + 2*l*w*x` + x*w^2 = 0 * where damping ratio, l = c/(2*sqrt(m*k)) and undamped angular momentum, w = sqrt(k/m) * x(t) = Ae^(-l*w*t)*sin(sqrt(1-l^2)*w*t + e) * with amplitude A, and phase e */ Node<float> m1, m2, m3; Link l1_to_l2, l2_to_l1, l2_to_l3, l3_to_l2; Weight<float> w1_to_w2, w2_to_w1, w2_to_w3, w3_to_w2; Model<float> model3; // Toy network: 1 hidden layer, fully connected, DCG m1 = Node<float>("m1", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); m2 = Node<float>("m2", NodeType::hidden, NodeStatus::initialized, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); m3 = Node<float>("m3", NodeType::output, NodeStatus::initialized, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1_to_w2 = Weight<float>("m1_to_m2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2_to_w1 = Weight<float>("m2_to_m1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2_to_w3 = Weight<float>("m2_to_m3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3_to_w2 = Weight<float>("m3_to_m2", weight_init, solver); weight_init.reset(); solver.reset(); // links l1_to_l2 = Link("l1_to_l2", "m1", "m2", "m1_to_m2"); l2_to_l1 = Link("l2_to_l1", "m2", "m1", "m2_to_m1"); l2_to_l3 = Link("l2_to_l3", "m2", "m3", "m2_to_m3"); l3_to_l2 = Link("l3_to_l2", "m3", "m2", "m3_to_m2"); model3.setId(3); model3.addNodes({ m1, m2, m3 }); model3.addWeights({ w1_to_w2, w2_to_w1, w2_to_w3, w3_to_w2 }); model3.addLinks({ l1_to_l2, l2_to_l1, l2_to_l3, l3_to_l2 }); return model3; } BOOST_AUTO_TEST_SUITE(modelInterpreter_IG) /** * Part 2 test suit for the ModelInterpreter class * * The following test methods that are * required of an interaction graph neural network */ Model<float> model_getFPOpsGraph = makeModelIG(); BOOST_AUTO_TEST_CASE(getFPOpsGraph_) { // Toy network: 1 hidden layer, fully connected, DAG ModelInterpreterDefaultDevice<float> model_interpreter; // get the next hidden layer int iter; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getFPOpsGraph_(model_getFPOpsGraph, FP_operations_list, iter); BOOST_CHECK_EQUAL(iter, 1); BOOST_CHECK_EQUAL(FP_operations_list.size(), 4); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "m2"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 1); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "m1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "m1_to_m2"); BOOST_CHECK_EQUAL(FP_operations_list[1].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[1].result.sink_node->getName(), "m1"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].time_step, 1); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].source_node->getName(), "m2"); BOOST_CHECK_EQUAL(FP_operations_list[1].arguments[0].weight->getName(), "m2_to_m1"); BOOST_CHECK_EQUAL(FP_operations_list[2].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[2].result.sink_node->getName(), "m3"); BOOST_CHECK_EQUAL(FP_operations_list[2].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[2].arguments[0].time_step, 1); BOOST_CHECK_EQUAL(FP_operations_list[2].arguments[0].source_node->getName(), "m2"); BOOST_CHECK_EQUAL(FP_operations_list[2].arguments[0].weight->getName(), "m2_to_m3"); BOOST_CHECK_EQUAL(FP_operations_list[3].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[3].result.sink_node->getName(), "m2"); BOOST_CHECK_EQUAL(FP_operations_list[3].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[3].arguments[0].time_step, 1); BOOST_CHECK_EQUAL(FP_operations_list[3].arguments[0].source_node->getName(), "m3"); BOOST_CHECK_EQUAL(FP_operations_list[3].arguments[0].weight->getName(), "m3_to_m2"); } Model<float> model_getTensorOperations = makeModelIG(); BOOST_AUTO_TEST_CASE(getTensorOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; int iter; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getFPOpsGraph_(model_getTensorOperations, FP_operations_list, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_list, identified_sink_nodes, false); BOOST_CHECK_EQUAL(identified_sink_nodes.size(), 4); BOOST_CHECK_EQUAL(identified_sink_nodes.count("m1/1"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("m2/0"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("m2/3"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("m3/2"), 1); BOOST_CHECK_EQUAL(tensor_ops.size(), 2); BOOST_CHECK_EQUAL(tensor_ops.at("m1/1")[0], 1); BOOST_CHECK_EQUAL(tensor_ops.at("m1/1")[1], 2); BOOST_CHECK_EQUAL(tensor_ops.at("m2/0")[0], 0); BOOST_CHECK_EQUAL(tensor_ops.at("m2/0")[1], 3); } Model<float> model_getForwardPropogationLayerTensorDimensions = makeModelIG(); BOOST_AUTO_TEST_CASE(getForwardPropogationLayerTensorDimensions) { ModelInterpreterDefaultDevice<float> model_interpreter; int iter; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getFPOpsGraph_(model_getForwardPropogationLayerTensorDimensions, FP_operations_list, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_list, identified_sink_nodes, false); std::map<int, int> max_layer_sizes; std::map<std::string, int> layer_name_pos; std::vector<int> source_layer_sizes, sink_layer_sizes; std::vector<std::vector<std::pair<int, int>>> weight_indices; std::vector<std::map<std::string, std::vector<std::pair<int, int>>>> shared_weight_indices; std::vector<std::vector<float>> weight_values; std::vector<bool> make_source_tensors, make_sink_tensors, make_weight_tensors; std::vector<int> source_layer_pos, sink_layer_pos; int tensor_layers_cnt = 0; int weight_layers_cnt = 0; model_interpreter.getForwardPropogationLayerTensorDimensions(FP_operations_list, tensor_ops, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, source_layer_pos, sink_layer_pos, max_layer_sizes, layer_name_pos, tensor_layers_cnt, weight_layers_cnt); BOOST_CHECK_EQUAL(source_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(source_layer_sizes[0], 1); BOOST_CHECK_EQUAL(source_layer_sizes[1], 2); BOOST_CHECK_EQUAL(sink_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(sink_layer_sizes[0], 2); BOOST_CHECK_EQUAL(sink_layer_sizes[1], 1); BOOST_CHECK_EQUAL(source_layer_pos.size(), 2); BOOST_CHECK_EQUAL(source_layer_pos.at(0), 1); BOOST_CHECK_EQUAL(source_layer_pos.at(1), 0); BOOST_CHECK_EQUAL(sink_layer_pos.size(), 2); BOOST_CHECK_EQUAL(sink_layer_pos.at(0), 0); BOOST_CHECK_EQUAL(sink_layer_pos.at(1), 1); BOOST_CHECK_EQUAL(max_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(max_layer_sizes.at(0), 1); BOOST_CHECK_EQUAL(max_layer_sizes.at(1), 0); BOOST_CHECK_EQUAL(layer_name_pos.size(), 0); BOOST_CHECK_EQUAL(weight_indices.size(), 2); BOOST_CHECK_EQUAL(weight_indices[0].size(), 2); BOOST_CHECK_EQUAL(weight_indices[1].size(), 2); std::vector<std::vector<std::pair<int, int>>> weight_indices_test1 = { {std::make_pair(0,0),std::make_pair(0,1)}, {std::make_pair(0,0),std::make_pair(1,0)} }; for (int tensor_iter = 0; tensor_iter < weight_indices_test1.size(); ++tensor_iter) { for (int i = 0; i < weight_indices_test1[tensor_iter].size(); ++i) { BOOST_CHECK_EQUAL(weight_indices[tensor_iter][i].first, weight_indices_test1[tensor_iter][i].first); BOOST_CHECK_EQUAL(weight_indices[tensor_iter][i].second, weight_indices_test1[tensor_iter][i].second); } } BOOST_CHECK_EQUAL(shared_weight_indices.size(), 2); BOOST_CHECK_EQUAL(shared_weight_indices[0].size(), 0); BOOST_CHECK_EQUAL(shared_weight_indices[1].size(), 0); BOOST_CHECK_EQUAL(weight_values.size(), 2); BOOST_CHECK_EQUAL(weight_values[0].size(), 2); BOOST_CHECK_EQUAL(weight_values[1].size(), 2); std::vector<std::vector<float>> weight_values_test1 = { {1, 1}, {1, 1} }; for (int tensor_iter = 0; tensor_iter < weight_values_test1.size(); ++tensor_iter) { for (int i = 0; i < weight_values_test1[tensor_iter].size(); ++i) { BOOST_CHECK_EQUAL(weight_values[tensor_iter][i], weight_values_test1[tensor_iter][i]); } } BOOST_CHECK_EQUAL(make_source_tensors.size(), 2); BOOST_CHECK(make_source_tensors[0]); BOOST_CHECK(!make_source_tensors[1]); BOOST_CHECK_EQUAL(make_sink_tensors.size(), 2); BOOST_CHECK(make_sink_tensors[0]); BOOST_CHECK(!make_sink_tensors[1]); BOOST_CHECK_EQUAL(make_weight_tensors.size(), 2); BOOST_CHECK(make_weight_tensors[0]); BOOST_CHECK(make_weight_tensors[1]); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_METABOLOMICSLATENTUNSCLASSDATASIMULATOR_H #define EVONET_METABOLOMICSLATENTUNSCLASSDATASIMULATOR_H // .h #include <EvoNet/simulator/BiochemicalDataSimulator.h> namespace EvoNet { template<typename TensorT> class MetabolomicsLatentUnsClassDataSimulator : public BiochemicalDataSimulator<TensorT> { public: void makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) override; void readAndProcessMetabolomicsTrainingAndValidationData(int& n_reaction_ids_training, int& n_labels_training, int& n_component_group_names_training, int& n_reaction_ids_validation, int& n_labels_validation, int& n_component_group_names_validation, const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train, const std::string& meta_data_filename_train, const std::string& metabo_data_filename_test, const std::string& meta_data_filename_test, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& offline_linear_scale_input, const bool& offline_log_transform_input, const bool& offline_standardize_input, const bool& online_linear_scale_input, const bool& online_log_transform_input, const bool& online_standardize_input, int& n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int& n_epochs, const int& batch_size, const int& memory_size) override; }; template<typename TensorT> inline void MetabolomicsLatentUnsClassDataSimulator<TensorT>::makeTrainingDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_training, const std::vector<std::string>& labels_training, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes assert(n_input_nodes == data_training.dimension(0)); assert(n_loss_output_nodes == /*2*this->n_encodings_continuous_ + */this->n_encodings_discrete_); assert(n_metric_output_nodes == /*2*this->n_encodings_continuous_ + */this->n_encodings_discrete_); assert(data_training.dimension(0) == features.size()); assert(data_training.dimension(1) == labels_training.size()); assert(this->n_encodings_continuous_ > 0); assert(this->n_encodings_discrete_ == this->labels_training_.size()); assert(batch_size > 0); assert(memory_size == 1); assert(n_epochs == this->n_encodings_discrete_ * this->labels_training_.size()); // Dummy data for the KL divergence losses Eigen::Tensor<TensorT, 4> KL_losses_continuous(batch_size, memory_size, this->n_encodings_continuous_, n_epochs); KL_losses_continuous.setZero(); // initialize the Tensors this->input_data_training_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_training_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_training_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_training_.resize(batch_size, memory_size, n_epochs); // expand the training data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_training.dimension(1))), 1); assert(expansion_factor == 1); const int over_expanded = data_training.dimension(1) * expansion_factor - batch_size * n_epochs; assert(over_expanded == 0); assert(batch_size * memory_size * n_epochs == data_training.dimension(1) * expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_training_expanded(data_training.dimension(0), data_training.dimension(1) * expansion_factor); Eigen::Tensor<std::string, 2> labels_training_expanded(data_training.dimension(1) * expansion_factor, 1); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i * data_training.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_training.dimension(0), data_training.dimension(1) }; data_training_expanded.slice(offset1, span1) = data_training; // Slices for the labels with a reorder to partition a unique label into each batch int step = 0, stride = labels_training.size()/this->labels_training_.size(), batch_iter = 0, iter = 0; for (int j = 0; j < data_training.dimension(1); ++j) { labels_training_expanded(i * data_training.dimension(1) + j, 0) = labels_training.at(iter); ++batch_iter; ++iter; if (batch_iter >= batch_size) { batch_iter = 0; iter -= batch_size; // subtract out the iterations along the batch iter += stride; // and jump to the next set of labels } if (iter >= data_training.dimension(1)) { ++step; iter = step; } } } // make the one-hot encodings Eigen::Tensor<TensorT, 2> one_hot_vec = OneHotEncoder<std::string, TensorT>(labels_training_expanded, this->labels_training_); // assign the input tensors auto data_training_expanded_4d = data_training_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_training.dimension(0), data_training.dimension(1) * expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_training.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_input_nodes, n_epochs })) = data_training_expanded_4d; // assign the loss tensors auto one_hot_vec_4d = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_training.dimension(1) * expansion_factor - over_expanded, one_hot_vec.dimension(1) }) ).reshape(Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_epochs, int(labels_training_.size()) }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 0,1,3,2 })); //this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; //this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, this->n_encodings_continuous_, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; //this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 2 * this->n_encodings_continuous_, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = one_hot_vec_4d; this->loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = one_hot_vec_4d; // assign the metric tensors //this->metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; //this->metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, this->n_encodings_continuous_, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; //this->metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 2 * this->n_encodings_continuous_, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = one_hot_vec_4d; this->metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = one_hot_vec_4d; } template<typename TensorT> inline void MetabolomicsLatentUnsClassDataSimulator<TensorT>::makeValidationDataForCache(const std::vector<std::string>& features, const Eigen::Tensor<TensorT, 2>& data_validation, const std::vector<std::string>& labels_validation, const int& n_epochs, const int& batch_size, const int& memory_size, const int& n_input_nodes, const int& n_loss_output_nodes, const int& n_metric_output_nodes, const bool& shuffle_data_and_labels) { // infer the input sizes assert(n_input_nodes == data_validation.dimension(0)); assert(n_loss_output_nodes == /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_); assert(n_metric_output_nodes == /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_); assert(data_validation.dimension(0) == features.size()); assert(data_validation.dimension(1) == labels_validation.size()); assert(this->n_encodings_continuous_ > 0); assert(this->n_encodings_discrete_ == this->labels_validation_.size()); assert(batch_size > 0); assert(memory_size == 1); assert(n_epochs == this->n_encodings_discrete_ * this->labels_validation_.size()); // Dummy data for the KL divergence losses Eigen::Tensor<TensorT, 4> KL_losses_continuous(batch_size, memory_size, this->n_encodings_continuous_, n_epochs); KL_losses_continuous.setZero(); // initialize the Tensors this->input_data_validation_.resize(batch_size, memory_size, n_input_nodes, n_epochs); this->loss_output_data_validation_.resize(batch_size, memory_size, n_loss_output_nodes, n_epochs); this->metric_output_data_validation_.resize(batch_size, memory_size, n_metric_output_nodes, n_epochs); this->time_steps_validation_.resize(batch_size, memory_size, n_epochs); // expand the validation data to fit into the requested input size const int expansion_factor = maxFunc(std::ceil(TensorT(batch_size * n_epochs) / TensorT(data_validation.dimension(1))), 1); if (expansion_factor != 1) { std::cout << "validation expansion_factor = " << expansion_factor << "." << std::endl; }; const int over_expanded = data_validation.dimension(1) * expansion_factor - batch_size * n_epochs; if (over_expanded != 0) { std::cout << "validation over_expanded = " << over_expanded << "." << std::endl; } assert(batch_size * memory_size * n_epochs == data_validation.dimension(1) * expansion_factor - over_expanded); Eigen::Tensor<TensorT, 2> data_validation_expanded(data_validation.dimension(0), data_validation.dimension(1) * expansion_factor); Eigen::Tensor<std::string, 2> labels_validation_expanded(data_validation.dimension(1) * expansion_factor, 1); for (int i = 0; i < expansion_factor; ++i) { // Slices for the data Eigen::array<Eigen::Index, 2> offset1 = { 0, i * data_validation.dimension(1) }; Eigen::array<Eigen::Index, 2> span1 = { data_validation.dimension(0), data_validation.dimension(1) }; data_validation_expanded.slice(offset1, span1) = data_validation; // Slices for the labels with a reorder int step = 0, stride = labels_validation.size()/this->labels_validation_.size(), iter = 0; for (int j = 0; j < data_validation.dimension(1); ++j) { labels_validation_expanded(i * data_validation.dimension(1) + j, 0) = labels_validation.at(iter); iter += stride; if (iter >= data_validation.dimension(1)) { ++step; iter = step; } } } // make the one-hot encodings Eigen::Tensor<TensorT, 2> one_hot_vec = OneHotEncoder<std::string, TensorT>(labels_validation_expanded, this->labels_validation_); // assign the input tensors auto data_validation_expanded_4d = data_validation_expanded.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_validation.dimension(0), data_validation.dimension(1) * expansion_factor - over_expanded }) ).reshape(Eigen::array<Eigen::Index, 4>({ data_validation.dimension(0), batch_size, memory_size, n_epochs }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 1,2,0,3 })); this->input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_input_nodes, n_epochs })) = data_validation_expanded_4d; // assign the loss tensors auto one_hot_vec_4d = one_hot_vec.slice(Eigen::array<Eigen::Index, 2>({ 0, 0 }), Eigen::array<Eigen::Index, 2>({ data_validation.dimension(1) * expansion_factor - over_expanded, one_hot_vec.dimension(1) }) ).reshape(Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, n_epochs, int(labels_validation_.size()) }) ).shuffle(Eigen::array<Eigen::Index, 4>({ 0,1,3,2 })); //this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; //this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, this->n_encodings_continuous_, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; //this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 2 * this->n_encodings_continuous_, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = one_hot_vec_4d; this->loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = one_hot_vec_4d; // assign the metric tensors //this->metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; //this->metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, this->n_encodings_continuous_, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_continuous_, n_epochs })) = KL_losses_continuous; //this->metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 2 * this->n_encodings_continuous_, 0 }), // Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = one_hot_vec_4d; this->metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ batch_size, memory_size, this->n_encodings_discrete_, n_epochs })) = one_hot_vec_4d; } template<typename TensorT> inline void MetabolomicsLatentUnsClassDataSimulator<TensorT>::readAndProcessMetabolomicsTrainingAndValidationData(int& n_reaction_ids_training, int& n_labels_training, int& n_component_group_names_training, int& n_reaction_ids_validation, int& n_labels_validation, int& n_component_group_names_validation, const std::string& biochem_rxns_filename, const std::string& metabo_data_filename_train, const std::string& meta_data_filename_train, const std::string& metabo_data_filename_test, const std::string& meta_data_filename_test, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& offline_linear_scale_input, const bool& offline_log_transform_input, const bool& offline_standardize_input, const bool& online_linear_scale_input, const bool& online_log_transform_input, const bool& online_standardize_input, int& n_reps_per_sample, const bool& randomize_sample_group_names, const bool& shuffle_data_and_labels, const int& n_epochs, const int& batch_size, const int& memory_size) { // Read in the data and make the data matrices std::vector<std::string> labels_training; std::vector<std::string> features_training; Eigen::Tensor<TensorT, 2> data_training; std::vector<std::string> labels_validation; std::vector<std::string> features_validation; Eigen::Tensor<TensorT, 2> data_validation; this->readAndMakeMetabolomicsTrainingAndValidationDataMatrices(n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, features_training, data_training, labels_training, features_validation, data_validation, labels_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, use_concentrations, use_MARs, sample_values, iter_values, fill_sampling, fill_mean, fill_zero, apply_fold_change, fold_change_ref, fold_change_log_base, n_reps_per_sample, false, //randomize_sample_group_names, n_epochs, batch_size, memory_size); // Make the training and validation data caches after an optional transformation step if (use_concentrations) { // Apply offline transformations this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, false, -1, -1, false, -1, -1); // Apply online transformations this->transformTrainingAndValidationDataOnline(data_training, data_validation, online_linear_scale_input, online_log_transform_input, online_standardize_input); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, n_component_group_names_training, /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_, /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_, shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, n_component_group_names_training, /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_, /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_, shuffle_data_and_labels); } else if (use_MARs) { // Apply offline transformations TensorT min_value = 1e-3; TensorT max_value = 1e3; if (offline_log_transform_input) { min_value = std::log(min_value); max_value = std::log(max_value); } this->transformTrainingAndValidationDataOffline(data_training, data_validation, offline_linear_scale_input, offline_log_transform_input, offline_standardize_input, true, min_value, max_value, false, -1, -1); // Apply online transformations this->transformTrainingAndValidationDataOnline(data_training, data_validation, online_linear_scale_input, online_log_transform_input, online_standardize_input); // Make the training data cache this->makeTrainingDataForCache(features_training, data_training, labels_training, n_epochs, batch_size, memory_size, n_reaction_ids_validation, /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_, /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_, shuffle_data_and_labels); this->makeValidationDataForCache(features_validation, data_validation, labels_validation, n_epochs, batch_size, memory_size, n_reaction_ids_validation, /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_, /*2 * this->n_encodings_continuous_ + */this->n_encodings_discrete_, shuffle_data_and_labels); } } } #endif //EVONET_METABOLOMICSLATENTUNSCLASSDATASIMULATOR_H<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_BIOCHEMICALREACTION_H #define EVONET_BIOCHEMICALREACTION_H /* @brief A collection of classes and methods for reading, writing, and parsing Biochemical reaction data and models. Please note that the code in this file is a work in progress. Use with caution! */ #include <EvoNet/io/csv.h> #include <EvoNet/io/CSVWriter.h> #include <EvoNet/core/Preprocessing.h> #include <EvoNet/core/StringParsing.h> #include <EvoNet/core/Statistics.h> #include <set> namespace EvoNet { // Data structures struct PWStats { std::string sample_name_1; std::string sample_name_2; std::string feature_name; std::string feature_comment; int n1, n2; std::pair<float, float> confidence_interval_1; std::pair<float, float> confidence_interval_2; float fold_change; float prob; bool is_significant = false; }; typedef std::map<std::string, std::vector<PWStats>> PWData; struct PWSampleSummary { std::string sample_name_1; std::string sample_name_2; int n_significant = 0; }; typedef std::vector<PWSampleSummary> PWSampleSummaries; struct PWFeatureSummary { std::string feature_name; int n_significant = 0; }; typedef std::vector<PWFeatureSummary> PWFeatureSummaries; struct PWTotalSummary { std::set<std::string> significant_pairs; int n_significant_pairs = 0; std::set<std::string> significant_features; int n_significant_features = 0; int n_significant_total = 0; }; struct MetabolomicsDatum { std::string sample_name; std::string sample_group_name; std::string component_name; std::string component_group_name; std::string calculated_concentration_units; float time_point; float calculated_concentration; bool used; std::string print() const; }; std::string MetabolomicsDatum::print() const { std::string met_datum_str = ""; met_datum_str += "sample_name = " + sample_name; met_datum_str += "; sample_group_name = " + sample_group_name; met_datum_str += "; component_name = " + component_name; met_datum_str += "; component_group_name = " + component_group_name; met_datum_str += "; calculated_concentration_units = " + calculated_concentration_units; met_datum_str += "; time_point = " + std::to_string(time_point); met_datum_str += "; calculated_concentration = " + std::to_string(calculated_concentration); met_datum_str += "; used = " + std::to_string(used); return met_datum_str; } typedef std::map<std::string, std::map<std::string, std::vector<MetabolomicsDatum>>> MetabolomicsData; struct BiochemicalReaction { std::string model_id; std::string reaction_id; std::string reaction_name; std::string equation; std::string subsystem; std::string gpr; std::vector<float> reactants_stoichiometry; std::vector<float> products_stoichiometry; std::vector<std::string> reactants_ids; std::vector<std::string> products_ids; // others if needed bool reversibility; bool used; void updateEquation(); std::string print() const; }; std::string BiochemicalReaction::print() const { auto stringifyFloatVec = [](const std::vector<float>& vec) { std::string vec_str = "{"; if (vec.size()) { vec_str += std::to_string(vec.at(0)); for (int i = 1; i < vec.size(); ++i) { vec_str += ", " + std::to_string(vec.at(i)); } } vec_str += "}"; return vec_str; }; auto stringifyStrVec = [](const std::vector<std::string>& vec) { std::string vec_str = "{"; if (vec.size()) { vec_str += vec.at(0); for (int i = 1; i < vec.size(); ++i) { vec_str += ", " + vec.at(i); } } vec_str += "}"; return vec_str; }; std::string react_str = ""; react_str += "model_id = " + model_id; react_str += "; reaction_id = " + reaction_id; react_str += "; reaction_name = " + reaction_name; react_str += "; equation = " + equation; react_str += "; subsystem = " + subsystem; react_str += "; gpr = " + gpr; react_str += "; reactants_stoichiometry = " + stringifyFloatVec(reactants_stoichiometry); react_str += "; products_stoichiometry = " + stringifyFloatVec(products_stoichiometry); react_str += "; reactants_ids = " + stringifyStrVec(reactants_ids); react_str += "; products_ids = " + stringifyStrVec(products_ids); react_str += "; reversibility = " + std::to_string(reversibility); react_str += "; used = " + std::to_string(used); return react_str; } void BiochemicalReaction::updateEquation() { std::string new_equation = ""; for (int i = 0; i < reactants_ids.size(); ++i) { if (i > 0) new_equation += " + "; if (std::abs(reactants_stoichiometry[i]) > 1) new_equation += std::to_string((int)std::abs(reactants_stoichiometry[i])) + " "; new_equation += reactants_ids[i]; } new_equation += " = "; for (int i = 0; i < products_ids.size(); ++i) { if (i > 0) new_equation += " + "; if (products_stoichiometry[i] > 1) new_equation += std::to_string((int)products_stoichiometry[i]) + " "; new_equation += products_ids[i]; } equation = new_equation; } typedef std::map<std::string, BiochemicalReaction> BiochemicalReactions; struct MetaDatum { std::string sample_group_name; std::string condition; std::string time; std::string subject; std::string temperature; std::string print() const; }; std::string MetaDatum::print() const { std::string met_datum_str = ""; met_datum_str += "sample_group_name = " + sample_group_name; met_datum_str += "; condition = " + condition; met_datum_str += "; time = " + time; met_datum_str += "; subject = " + subject; met_datum_str += "; temperature = " + temperature; return met_datum_str; } typedef std::map<std::string, MetaDatum> MetaData; template<typename TensorT> class BiochemicalReactionModel { public: BiochemicalReactionModel() = default; ~BiochemicalReactionModel() = default; /* @brief Read in the metabolomics data from .csv file @param[in] filename @param[in, out] metabolomicsData **/ static void ReadMetabolomicsData(const std::string& filename, MetabolomicsData& metabolomicsData); /* @brief Read in the biochemical reactsion from .csv file @param[in] filename @param[in, out] biochemicalReactions **/ static void ReadBiochemicalReactions(const std::string& filename, BiochemicalReactions& biochemicalReactions, bool remove_compartments = false); /* @brief Read in the meta data from .csv file @param[in] filename @param[in, out] metaData **/ static void ReadMetaData(const std::string& filename, MetaData& metaData); void readMetabolomicsData(const std::string& filename) { ReadMetabolomicsData(filename, metabolomicsData_); } void readBiochemicalReactions(const std::string& filename, bool remove_compartments = false) { ReadBiochemicalReactions(filename, biochemicalReactions_, remove_compartments); } void readMetaData(const std::string& filename) { ReadMetaData(filename, metaData_); } /* @brief Find candidate reactions that can be used to calculate the MAR @param[in] biochemicalReactions @param[in] include_currency_mets Boolean to indicate whether or not to include currency metabolites in the MAR @param[in] exclude_non_currency_mets Boolean to indicate whether or not to include only currency metabolites in the MAR @param[in] threshold Minimal metabolite coverage value @returns a vector of reaction_ids **/ void findMARs(bool exclude_currency_mets = false, bool exclude_non_currency_mets = false, TensorT threshold = 0.75); /* @brief Remove MARs that involve the same set of metabolites @returns a vector of reaction_ids **/ void removeRedundantMARs(); /* @brief Find all unique component group names in the data set @returns a vector of component_group_names **/ void findComponentGroupNames(); /* @brief Find all unique meta data labels in the data set @returns a vector of unique labels, sample_group_names, and a map of sample_group_names to labels **/ void findLabels(const std::string& label = "condition"); /* @brief Generate default reaction concentrations for certain highly connected metabolites (e.g., h, h2o, co2) with units of uM @param[in] filename @param[in, out] metabolomicsData **/ static float makeDefaultMetabolomicsData(const std::string& met_id); /* @brief Calculate the Mass Action Ratio (MAR) MAR = R1^r1 * R2^r2 / (P1^p1 * P2^p2) @param[in] metabolomicsData @param[in] biochemicalReaction **/ static float calculateMAR(const std::map<std::string, std::vector<MetabolomicsDatum>>& metabolomicsData, const BiochemicalReaction& biochemicalReaction); /* @brief Get random concentration @param[in] metabolomicsData @param[in] met_id **/ static float getRandomConcentration(const std::map<std::string, std::vector<MetabolomicsDatum>>& metabolomicsData, const std::string& met_id); /* @brief Get default metabolites including inorganic ions, metals, and salts @return Vector of "default" metabolite strings **/ static std::vector<std::string> getDefaultMets(); /* @brief Get currency metabolites including @return Vector of currency metabolite strings **/ static std::vector<std::string> getCurrencyMets(); /* @brief Convert the metabolomics data structure into a 2D tensor with dim0 = features and dim1 = samples and replicates Use cases for use_concentrations: 1. Tensor with the same number of replicates for each feature equal to the maximum number of replicates for the sample with fill_sampling, fill_mean, or fill_zero = true and iter_values = true 2. Tensor with the same number of replicates for each feature equal to the maximum number of replicates for the sample or a much greater number and randomly sampled concentration values with sample_values = true Use cases for use_MARs: 1.Tensor with the same number of replicates for each feature equal to the maximum number of replicates for the sample or a much greater number and randomly sampled mass action ratios with sample_values = true Assume min of 1e-3 and max of 1e3 when performing any kind of data standardization or transformation @param[out] data The metabolomics data in 2D Tensor form @param[out] labels The labels (i.e., samples/replicates) in 1D Tensor form @return The matrix of metabolomics data **/ void getMetDataAsTensors(Eigen::Tensor<TensorT, 2>& data, std::vector<std::string>& labels, const std::vector<std::string>& sample_group_names, const std::vector<std::string>& component_group_names, const std::map<std::string, std::string>& sample_group_name_to_label, const std::map<std::string, int>& sample_group_name_to_reps, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& randomize_labels) const; /* @brief Estimate the maximum number of replicates in the data set @param[in] sample_group_name_to_reps A map of replicates per sample group name @param[in] sample_group_names The sample group names @param[in] component_group_names The component group names @return The maximum number of replicates and the total number of labels **/ std::pair<int, int> getMaxReplicatesAndNLabels(std::map<std::string, int>& sample_group_name_to_reps, const std::vector<std::string>& sample_group_names, const std::vector<std::string>& component_group_names) const; /* @brief Clear all data structures */ void clear(); MetabolomicsData metabolomicsData_; BiochemicalReactions biochemicalReactions_; MetaData metaData_; std::vector<std::string> reaction_ids_; // or MAR ids std::vector<std::string> sample_group_names_; std::vector<std::string> labels_; std::vector<std::string> component_group_names_; std::map<std::string, std::string> sample_group_name_to_label_; }; template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::ReadMetabolomicsData(const std::string & filename, MetabolomicsData & metabolomicsData) { io::CSVReader<8, io::trim_chars<' ', '\t'>, io::double_quote_escape<',', '"'>> data_in(filename); data_in.read_header(io::ignore_extra_column, "sample_group_name", "sample_name", "component_group_name", "component_name", "calculated_concentration_units", "used_", "time_point", "calculated_concentration"); std::string sampe_group_name_str, sample_name_str, component_group_name_str, component_name_str, calculated_concentration_units_str, used__str, time_point_str, calculated_concentration_str; while (data_in.read_row(sampe_group_name_str, sample_name_str, component_group_name_str, component_name_str, calculated_concentration_units_str, used__str, time_point_str, calculated_concentration_str)) { // parse the .csv file MetabolomicsDatum row; row.sample_group_name = sampe_group_name_str; row.sample_name = sample_name_str; // metabolite id cleanup if (component_group_name_str == "Pool_2pg_3pg") component_group_name_str = "2pg"; else if (component_group_name_str == "Hexose_Pool_fru_glc-D") component_group_name_str = "glc-D"; // replace "-" with "__" component_group_name_str = ReplaceTokens(component_group_name_str, { "-" }, "__"); row.component_group_name = component_group_name_str; // matches the met_id in the biochemical models row.component_name = component_name_str; row.calculated_concentration_units = calculated_concentration_units_str; row.time_point = std::stof(time_point_str); row.used = (used__str == "t" || used__str == "TRUE") ? true : false; if (calculated_concentration_str != "" && calculated_concentration_str != "NULL") row.calculated_concentration = std::stof(calculated_concentration_str); else row.calculated_concentration = 0.0f; // build up the map std::vector<MetabolomicsDatum> rows = { row }; std::map<std::string, std::vector<MetabolomicsDatum>> replicate; replicate.emplace(component_group_name_str, rows); auto found_in_data = metabolomicsData.emplace(sampe_group_name_str, replicate); if (!found_in_data.second) { auto found_in_component = metabolomicsData.at(sampe_group_name_str).emplace(component_group_name_str, rows); if (!found_in_component.second) { metabolomicsData.at(sampe_group_name_str).at(component_group_name_str).push_back(row); } } if (component_group_name_str == "2pg") { row.component_group_name = "3pg"; rows = { row }; auto found_in_component = metabolomicsData.at(sampe_group_name_str).emplace(row.component_group_name, rows); if (!found_in_component.second) { metabolomicsData.at(sampe_group_name_str).at(row.component_group_name).push_back(row); } } } } template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::ReadBiochemicalReactions(const std::string & filename, BiochemicalReactions & biochemicalReactions, bool remove_compartments) { io::CSVReader<10, io::trim_chars<' ', '\t'>, io::double_quote_escape<',', '"'>> data_in(filename); data_in.read_header(io::ignore_extra_column, "rxn_id", "rxn_name", "equation", "gpr", "used_", "reactants_stoichiometry", "products_stoichiometry", "reactants_ids", "products_ids", "reversibility"); std::string rxn_id_str, rxn_name_str, equation_str, gpr_str, used__str, reactants_stoichiometry_str, products_stoichiometry_str, reactants_ids_str, products_ids_str, reversibility_str; while (data_in.read_row(rxn_id_str, rxn_name_str, equation_str, gpr_str, used__str, reactants_stoichiometry_str, products_stoichiometry_str, reactants_ids_str, products_ids_str, reversibility_str)) { // parse the .csv file BiochemicalReaction row; row.reaction_name = rxn_name_str; row.reaction_id = rxn_id_str; row.equation = equation_str; row.gpr = gpr_str; row.used = (used__str == "t") ? true : false; // parse the reactant and product ids std::vector<std::string> reactants_ids; if (remove_compartments) reactants_ids = SplitString(ReplaceTokens(reactants_ids_str, { "[\{\}\']", "_p", "_c", "_e", "_m", "_r", "\\s+" }, ""), ","); else reactants_ids = SplitString(ReplaceTokens(reactants_ids_str, { "[\{\}\']", "\\s+" }, ""), ","); for (const std::string& met_id : reactants_ids) { if (!met_id.empty()) { row.reactants_ids.push_back(met_id); } } std::vector<std::string> products_ids; if (remove_compartments) products_ids = SplitString(ReplaceTokens(products_ids_str, { "[\{\}\']", "_p", "_c", "_e", "_m", "_r", "\\s+" }, ""), ","); else products_ids = SplitString(ReplaceTokens(products_ids_str, { "[\{\}\']", "\\s+" }, ""), ","); for (const std::string& met_id : products_ids) { if (!met_id.empty()) { row.products_ids.push_back(met_id); } } std::vector<std::string> reactants_stoichiometry_vector = SplitString(ReplaceTokens(reactants_stoichiometry_str, { "[\{\}]", "\\s+" }, ""), ","); for (const std::string& int_str : reactants_stoichiometry_vector) if (int_str != "") row.reactants_stoichiometry.push_back(std::stof(int_str)); std::vector<std::string> products_stoichiometry_vector = SplitString(ReplaceTokens(products_stoichiometry_str, { "[\{\}]", "\\s+" }, ""), ","); for (const std::string& int_str : products_stoichiometry_vector) if (int_str != "") row.products_stoichiometry.push_back(std::stof(int_str)); assert(row.reactants_ids.size() == row.reactants_stoichiometry.size()); assert(row.products_ids.size() == row.products_stoichiometry.size()); // parse the reversibility if (reversibility_str == "t" || reversibility_str == "TRUE") { row.reversibility = true; } else if (reversibility_str == "f" || reversibility_str == "FALSE") { row.reversibility = false; } else { std::cout << "Reversibility text: " << reversibility_str << " is not supported" << std::endl; } // build up the map auto found_in_data = biochemicalReactions.emplace(rxn_id_str, row); if (!found_in_data.second) biochemicalReactions.at(rxn_id_str) = row; } } template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::ReadMetaData(const std::string & filename, MetaData & metaData) { io::CSVReader<5> data_in(filename); data_in.read_header(io::ignore_extra_column, "sample_group_name", "condition", "time", "subject", "temperature"); std::string sample_group_name_str, condition_str, time_str, subject_str, temperature_str; while (data_in.read_row(sample_group_name_str, condition_str, time_str, subject_str, temperature_str)) { // parse the .csv file MetaDatum row; row.sample_group_name = sample_group_name_str; row.condition = condition_str; row.time = time_str; row.subject = subject_str; row.temperature = temperature_str; // build up the map auto found_in_data = metaData.emplace(sample_group_name_str, row); if (!found_in_data.second) metaData.at(sample_group_name_str) = row; } } template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::findMARs(bool exclude_currency_mets, bool exclude_non_currency_mets, TensorT threshold) { std::vector<std::string> component_group_names = component_group_names_; BiochemicalReactions new_reactions; std::vector<std::string> ignore_mets = {}; // set up the ignore list (metabolites not included in the MAR count) std::vector<std::string> exlude_mets = {}; // set up the exclude list (metabolites not included in the MAR met ids list) if (exclude_currency_mets) { // remove currency mets from the component_group_names ignore_mets = getDefaultMets(); exlude_mets = getDefaultMets(); std::vector<std::string> component_group_names_copy = component_group_names; component_group_names.clear(); std::vector<std::string> currency_mets = getCurrencyMets(); for (const std::string& met_id : component_group_names_copy) { if (std::count(currency_mets.begin(), currency_mets.end(), met_id) == 0) { component_group_names.push_back(met_id); } else { exlude_mets.push_back(met_id); ignore_mets.push_back(met_id); } } } else if (exclude_non_currency_mets) { // include only currency mets from the component_group_names std::vector<std::string> component_group_names_copy = component_group_names; component_group_names.clear(); std::vector<std::string> currency_mets = getCurrencyMets(); for (const std::string& met_id : component_group_names_copy) { if (std::count(currency_mets.begin(), currency_mets.end(), met_id) > 0) { component_group_names.push_back(met_id); } else { exlude_mets.push_back(met_id); ignore_mets.push_back(met_id); } } } else { ignore_mets = getDefaultMets(); } for (const auto& biochem_rxn_map : biochemicalReactions_) { std::vector<std::string> products_ids = biochem_rxn_map.second.products_ids; std::vector<std::string> reactants_ids = biochem_rxn_map.second.reactants_ids; // ignore source/sink reactions if (products_ids.size() == 0 || reactants_ids.size() == 0) continue; // ignore transport reactions std::sort(products_ids.begin(), products_ids.end()); std::sort(reactants_ids.begin(), reactants_ids.end()); if (products_ids == reactants_ids) continue; // ignore reactions with less than 50% metabolomics data coverage int total_cnt = 0; int prod_cnt = 0; int react_cnt = 0; std::vector<std::string> prod_ids; std::vector<std::string> react_ids; std::vector<float> prod_stoich; std::vector<float> react_stoich; for (int i = 0; i < biochem_rxn_map.second.products_ids.size(); ++i) { if (std::count(component_group_names.begin(), component_group_names.end(), biochem_rxn_map.second.products_ids[i]) != 0) { ++prod_cnt; } if (std::count(ignore_mets.begin(), ignore_mets.end(), biochem_rxn_map.second.products_ids[i]) == 0) { ++total_cnt; } if (std::count(exlude_mets.begin(), exlude_mets.end(), biochem_rxn_map.second.products_ids[i]) == 0 && std::count(component_group_names.begin(), component_group_names.end(), biochem_rxn_map.second.products_ids[i]) != 0) { prod_ids.push_back(biochem_rxn_map.second.products_ids[i]); prod_stoich.push_back(biochem_rxn_map.second.products_stoichiometry[i]); } } for (int i = 0; i < biochem_rxn_map.second.reactants_ids.size(); ++i) { if (std::count(component_group_names.begin(), component_group_names.end(), biochem_rxn_map.second.reactants_ids[i]) != 0) { ++react_cnt; } if (std::count(ignore_mets.begin(), ignore_mets.end(), biochem_rxn_map.second.reactants_ids[i]) == 0) { ++total_cnt; } if (std::count(exlude_mets.begin(), exlude_mets.end(), biochem_rxn_map.second.reactants_ids[i]) == 0 && std::count(component_group_names.begin(), component_group_names.end(), biochem_rxn_map.second.reactants_ids[i]) != 0) { react_ids.push_back(biochem_rxn_map.second.reactants_ids[i]); react_stoich.push_back(biochem_rxn_map.second.reactants_stoichiometry[i]); } } if (((TensorT)(prod_cnt + react_cnt)) / ((TensorT)total_cnt) < threshold) continue; if (prod_cnt <= 0 || react_cnt <= 0) continue; if (exclude_currency_mets) { std::string rxn_id = biochem_rxn_map.first + "_" + "NoCurrencyMets"; BiochemicalReaction mod_rxn = biochem_rxn_map.second; mod_rxn.products_ids = prod_ids; mod_rxn.products_stoichiometry = prod_stoich; mod_rxn.reactants_ids = react_ids; mod_rxn.reactants_stoichiometry = react_stoich; mod_rxn.updateEquation(); new_reactions.emplace(rxn_id, mod_rxn); reaction_ids_.push_back(rxn_id); } else if (exclude_non_currency_mets) { std::string rxn_id = biochem_rxn_map.first + "_" + "CurrencyOnlyMets"; BiochemicalReaction mod_rxn = biochem_rxn_map.second; mod_rxn.products_ids = prod_ids; mod_rxn.products_stoichiometry = prod_stoich; mod_rxn.reactants_ids = react_ids; mod_rxn.reactants_stoichiometry = react_stoich; mod_rxn.updateEquation(); new_reactions.emplace(rxn_id, mod_rxn); reaction_ids_.push_back(rxn_id); } else { reaction_ids_.push_back(biochem_rxn_map.first); } } if (new_reactions.size() > 0) { for (auto& new_rxn : new_reactions) { biochemicalReactions_.emplace(new_rxn.first, new_rxn.second); } } } template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::removeRedundantMARs() { std::vector<std::string> reaction_ids_copy, unique_reactants_ids; for (const std::string& reaction_id : reaction_ids_) { std::vector<std::string> products_ids = biochemicalReactions_.at(reaction_id).products_ids; std::vector<std::string> reactants_ids = biochemicalReactions_.at(reaction_id).reactants_ids; std::vector<std::string> metabolite_ids; // extract out products and reactants for (const std::string& met_id : products_ids) { if (std::count(component_group_names_.begin(), component_group_names_.end(), met_id) != 0) metabolite_ids.push_back(met_id); } for (const std::string& met_id : reactants_ids) { if (std::count(component_group_names_.begin(), component_group_names_.end(), met_id) != 0) metabolite_ids.push_back(met_id); } // sort the metabolite ids, and concatenate into a string std::sort(metabolite_ids.begin(), metabolite_ids.end()); std::string metabolites; for (auto const& s : metabolite_ids) { metabolites += "/" + s; } // check if the concatenated metabolites exist if (std::count(unique_reactants_ids.begin(), unique_reactants_ids.end(), metabolites) == 0) { reaction_ids_copy.push_back(reaction_id); unique_reactants_ids.push_back(metabolites); } } reaction_ids_ = reaction_ids_copy; } template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::findComponentGroupNames() { // get all of the component_group_names std::set<std::string> component_group_names; for (auto const& met_map1 : metabolomicsData_) for (auto const& met_map_2 : met_map1.second) component_group_names.insert(met_map_2.first); component_group_names_.assign(component_group_names.begin(), component_group_names.end()); } template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::findLabels(const std::string& label) { // get all of the sample group names/labels sample_group_names_.clear(); labels_.clear(); sample_group_name_to_label_.clear(); sample_group_names_.reserve(metaData_.size()); labels_.reserve(metaData_.size()); for (auto const& imap : metaData_) { sample_group_names_.push_back(imap.first); if (label == "condition") { sample_group_name_to_label_.emplace(imap.first, imap.second.condition); if (std::count(labels_.begin(), labels_.end(), imap.second.condition) == 0) labels_.push_back(imap.second.condition); } else if (label == "subject") { sample_group_name_to_label_.emplace(imap.first, imap.second.subject); if (std::count(labels_.begin(), labels_.end(), imap.second.subject) == 0) labels_.push_back(imap.second.subject); } } } template<typename TensorT> inline float BiochemicalReactionModel<TensorT>::makeDefaultMetabolomicsData(const std::string & met_id) { if (met_id == "pi") return 1.0; else if (met_id == "h2o") return 55.0e-3; else if (met_id == "h2") return 34.0; else if (met_id == "o2") return 55.0; else if (met_id == "co2") return 1.4; else if (met_id == "h") return 1.0; else return 1.0; } template<typename TensorT> inline float BiochemicalReactionModel<TensorT>::calculateMAR(const std::map<std::string, std::vector<MetabolomicsDatum>>& metabolomicsData, const BiochemicalReaction & biochemicalReaction) { // calculate MAR TensorT mar = 1; for (int i = 0; i < biochemicalReaction.products_ids.size(); ++i) { std::string met_id = biochemicalReaction.products_ids[i]; int met_stoich = biochemicalReaction.products_stoichiometry[i]; TensorT met_conc = 1; if (metabolomicsData.count(met_id) > 0) { MetabolomicsDatum metabolomics_datum = selectRandomElement(metabolomicsData.at(met_id)); met_conc = metabolomics_datum.calculated_concentration; } else met_conc = makeDefaultMetabolomicsData(met_id); mar *= pow(met_conc, met_stoich); } for (int i = 0; i < biochemicalReaction.reactants_ids.size(); ++i) { std::string met_id = biochemicalReaction.reactants_ids[i]; int met_stoich = biochemicalReaction.reactants_stoichiometry[i]; TensorT met_conc = 1; if (metabolomicsData.count(met_id) > 0) { MetabolomicsDatum metabolomics_datum = selectRandomElement(metabolomicsData.at(met_id)); met_conc = metabolomics_datum.calculated_concentration; } else met_conc = makeDefaultMetabolomicsData(met_id); mar *= pow(met_conc, met_stoich); } // check for the upper/lower limits if (mar >= 1e3) mar = 1e3; else if (mar <= 1e-3) mar = 1e-3; return mar; } template<typename TensorT> inline float BiochemicalReactionModel<TensorT>::getRandomConcentration(const std::map<std::string, std::vector<MetabolomicsDatum>>& metabolomicsData, const std::string & met_id) { if (metabolomicsData.count(met_id) > 0) { MetabolomicsDatum metabolomics_datum = selectRandomElement(metabolomicsData.at(met_id)); return metabolomics_datum.calculated_concentration; } else { return 0.0f; } } template<typename TensorT> inline std::vector<std::string> BiochemicalReactionModel<TensorT>::getDefaultMets() { std::vector<std::string> default_mets = { "pi", "h", "h2", "h2o", "co2", "o2", "so4", "so3", "o2s", "no", "nh3", "nh4", "na1", "fe2", "fe3", "hco3", "h2o2", "ca2", "co", "k", "cl" }; return default_mets; } template<typename TensorT> inline std::vector<std::string> BiochemicalReactionModel<TensorT>::getCurrencyMets() { std::vector<std::string> currency_mets = { // charged/uncharged nucleotides "atp", "adp", "amp", "itp", "idp", "imp", "gtp", "gdp", "gmp", "utp", "udp", "ump", "ctp", "cdp", "cmp", "xtp", "xdp", "xmp", "ttp", "tdp", "tmp", // redox metabolites "nad", "nadh", "nadp", "nadph", "gthox", "gthrd", // COA moieties "accoa", "coa", // charged/uncharged nitrogen metabolites "glu__L", "akg", "gln__L" }; return currency_mets; } template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::getMetDataAsTensors(Eigen::Tensor<TensorT, 2>& data, std::vector<std::string>& labels, const std::vector<std::string>& sample_group_names, const std::vector<std::string>& component_group_names, const std::map<std::string, std::string>& sample_group_name_to_label, const std::map<std::string, int>& sample_group_name_to_reps, const bool& use_concentrations, const bool& use_MARs, const bool& sample_values, const bool& iter_values, const bool& fill_sampling, const bool& fill_mean, const bool& fill_zero, const bool& apply_fold_change, const std::string& fold_change_ref, const TensorT& fold_change_log_base, const bool& randomize_sample_group_names) const { // clear the data structures data.setZero(); labels.clear(); // initialize needed helper auto calcMean = [](const std::vector<MetabolomicsDatum>& met_data) { TensorT sum = 0; for (const MetabolomicsDatum& met_datum : met_data) { sum += met_datum.calculated_concentration; } TensorT mean = sum / met_data.size(); return mean; }; auto calcFC = [](const TensorT& value, const TensorT& ref, const TensorT& fold_change_log_base) { TensorT fc = 0; if (value > 0 && ref > 0) { fc = minFunc(maxFunc(std::log(value / ref) / std::log(fold_change_log_base), -1), 1); } return fc; }; // determine the number of total samples int total_samples = 0; int samples_cnt = 0; std::vector<int> sample_indices; for (const std::string& sample_group_name : sample_group_names) { total_samples += sample_group_name_to_reps.at(sample_group_name); for (int rep_iter = 0; rep_iter < sample_group_name_to_reps.at(sample_group_name); ++rep_iter) { sample_indices.push_back(samples_cnt); ++samples_cnt; } } labels.resize(total_samples); // Shuffle the indices for the labels if (randomize_sample_group_names) { auto rng = std::default_random_engine{}; std::shuffle(std::begin(sample_indices), std::end(sample_indices), rng); } // optimization: create a cache for the means // create the data matrix int sample_iter = -1; // track the number of samples samples_cnt = 0; for (const std::string& sample_group_name : sample_group_names) { ++sample_iter; // Check for missing sample_group_names if (metabolomicsData_.count(sample_group_name) <= 0) { //std::cout << "sample_group_name " << sample_group_name << " is missing." << std::endl; continue; } int feature_iter = -1; // track the number of features for (const std::string& component_group_name : component_group_names) { ++feature_iter; // Check for missing component_group_names if (use_concentrations && metabolomicsData_.at(sample_group_name).count(component_group_name) <= 0) { //std::cout << "component_group_name " << component_group_name << " is missing from sample_group_name " << sample_group_name << "." << std::endl; continue; } // Iterate through each replicate and add the data to the data matrix int samples_cnt_prev = samples_cnt; for (int rep_iter = 0; rep_iter < sample_group_name_to_reps.at(sample_group_name); ++rep_iter) { TensorT value = 0; if (use_concentrations && sample_values) { // Assign the value for each replicate through random sampling of the replicates MetabolomicsDatum random_met = selectRandomElement(metabolomicsData_.at(sample_group_name).at(component_group_name)); value = random_met.calculated_concentration; if (apply_fold_change) { if (metabolomicsData_.at(fold_change_ref).count(component_group_name) > 0) value = calcFC(value, selectRandomElement(metabolomicsData_.at(fold_change_ref).at(component_group_name)).calculated_concentration, fold_change_log_base); else value = 0; } } else if (use_concentrations && iter_values) { // Or by iterating through the replicates filling in missing values as needed if (rep_iter >= metabolomicsData_.at(sample_group_name).at(component_group_name).size() && fill_sampling) { MetabolomicsDatum random_met = selectRandomElement(metabolomicsData_.at(sample_group_name).at(component_group_name)); value = random_met.calculated_concentration; } else if (rep_iter >= metabolomicsData_.at(sample_group_name).at(component_group_name).size() && fill_mean) { value = calcMean(metabolomicsData_.at(sample_group_name).at(component_group_name)); } else if (rep_iter >= metabolomicsData_.at(sample_group_name).at(component_group_name).size() && fill_zero) { value = 1e-6; } else { value = metabolomicsData_.at(sample_group_name).at(component_group_name).at(rep_iter).calculated_concentration; } if (apply_fold_change) { if (metabolomicsData_.at(fold_change_ref).count(component_group_name) > 0 && rep_iter >= metabolomicsData_.at(fold_change_ref).at(component_group_name).size() && fill_sampling) { value = calcFC(value, selectRandomElement(metabolomicsData_.at(fold_change_ref).at(component_group_name)).calculated_concentration, fold_change_log_base); } else if (metabolomicsData_.at(fold_change_ref).count(component_group_name) > 0 && rep_iter >= metabolomicsData_.at(fold_change_ref).at(component_group_name).size() && fill_mean) { value = calcFC(value, calcMean(metabolomicsData_.at(fold_change_ref).at(component_group_name)), fold_change_log_base); } else if (metabolomicsData_.at(fold_change_ref).count(component_group_name) > 0 && rep_iter >= metabolomicsData_.at(fold_change_ref).at(component_group_name).size() && fill_zero) { value = 0; } else if (metabolomicsData_.at(fold_change_ref).count(component_group_name) == 0) { value = 0; } else { value = calcFC(value, metabolomicsData_.at(fold_change_ref).at(component_group_name).at(rep_iter).calculated_concentration, fold_change_log_base); } } } else if (use_MARs && sample_values) { // OR by sampling mass action ratios value = calculateMAR(metabolomicsData_.at(sample_group_name), biochemicalReactions_.at(component_group_name)); if (apply_fold_change) { value = calcFC(value, calculateMAR(metabolomicsData_.at(fold_change_ref), biochemicalReactions_.at(component_group_name)), fold_change_log_base); } } data(feature_iter, sample_indices.at(samples_cnt_prev)) = value; ++samples_cnt_prev; } } // Iterate through each replicate and add the label to the label matrix for (int rep_iter = 0; rep_iter < sample_group_name_to_reps.at(sample_group_name); ++rep_iter) { labels.at(sample_indices.at(samples_cnt)) = sample_group_name_to_label.at(sample_group_name); ++samples_cnt; } } } template<typename TensorT> inline std::pair<int, int> BiochemicalReactionModel<TensorT>::getMaxReplicatesAndNLabels(std::map<std::string, int>& sample_group_name_to_reps, const std::vector<std::string>& sample_group_names, const std::vector<std::string>& component_group_names) const { int max_reps = 0; // count of the global max replicates int n_labels = 0; sample_group_name_to_reps.clear(); for (const std::string& sample_group_name : sample_group_names) { if (metabolomicsData_.count(sample_group_name) <= 0) continue; int max_reps_per_sample = 0; // count of the per sample max replicates for (const std::string& component_group_name : component_group_names) { if (metabolomicsData_.at(sample_group_name).count(component_group_name) <= 0) continue; int n_reps = metabolomicsData_.at(sample_group_name).at(component_group_name).size(); if (max_reps < n_reps) max_reps = n_reps; // update the global max_reps if (max_reps_per_sample < n_reps) max_reps_per_sample = n_reps; // update the per sample max reps } n_labels += max_reps_per_sample; // estimate the number of replicates per sample based on the per sample max reps sample_group_name_to_reps.emplace(sample_group_name, max_reps_per_sample); } return std::make_pair(max_reps, n_labels); } template<typename TensorT> inline void BiochemicalReactionModel<TensorT>::clear() { metabolomicsData_.clear(); biochemicalReactions_.clear(); metaData_.clear(); reaction_ids_.clear(); sample_group_names_.clear(); labels_.clear(); component_group_names_.clear(); } } #endif //EVONET_BIOCHEMICALREACTION_H<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE LinkFile test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/LinkFile.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(LinkFile1) BOOST_AUTO_TEST_CASE(constructor) { LinkFile* ptr = nullptr; LinkFile* nullPointer = nullptr; ptr = new LinkFile(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { LinkFile* ptr = nullptr; ptr = new LinkFile(); delete ptr; } BOOST_AUTO_TEST_CASE(storeAndLoadCsv) { LinkFile data; std::string filename = "LinkFileTest.csv"; // create list of dummy links std::map<std::string, std::shared_ptr<Link>> links; for (int i=0; i<3; ++i) { std::shared_ptr<Link> link(new Link( "Link_" + std::to_string(i), "Source_" + std::to_string(i), "Sink_" + std::to_string(i), "Weight_" + std::to_string(i))); link->setModuleName(std::to_string(i)); links.emplace("Link_" + std::to_string(i), link); } data.storeLinksCsv(filename, links); std::map<std::string, std::shared_ptr<Link>> links_test; data.loadLinksCsv(filename, links_test); int i = 0; for (auto& link_map: links_test) { BOOST_CHECK_EQUAL(link_map.second->getName(), "Link_" + std::to_string(i)); BOOST_CHECK_EQUAL(link_map.second->getSourceNodeName(), "Source_" + std::to_string(i)); BOOST_CHECK_EQUAL(link_map.second->getSinkNodeName(), "Sink_" + std::to_string(i)); BOOST_CHECK_EQUAL(link_map.second->getWeightName(), "Weight_" + std::to_string(i)); BOOST_CHECK_EQUAL(link_map.second->getModuleName(), std::to_string(i)); ++i; } } BOOST_AUTO_TEST_CASE(storeAndLoadBinary) { LinkFile data; std::string filename = "LinkFileTest.bin"; // create list of dummy links std::map<std::string, std::shared_ptr<Link>> links; for (int i = 0; i < 3; ++i) { std::shared_ptr<Link> link(new Link( "Link_" + std::to_string(i), "Source_" + std::to_string(i), "Sink_" + std::to_string(i), "Weight_" + std::to_string(i))); link->setModuleName(std::to_string(i)); links.emplace("Link_" + std::to_string(i), link); } data.storeLinksBinary(filename, links); std::map<std::string, std::shared_ptr<Link>> links_test; data.loadLinksBinary(filename, links_test); int i = 0; for (auto& link_map : links_test) { BOOST_CHECK_EQUAL(link_map.second->getName(), "Link_" + std::to_string(i)); BOOST_CHECK_EQUAL(link_map.second->getSourceNodeName(), "Source_" + std::to_string(i)); BOOST_CHECK_EQUAL(link_map.second->getSinkNodeName(), "Sink_" + std::to_string(i)); BOOST_CHECK_EQUAL(link_map.second->getWeightName(), "Weight_" + std::to_string(i)); BOOST_CHECK_EQUAL(link_map.second->getModuleName(), std::to_string(i)); ++i; } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE Parameters test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/io/Parameters.h> #include <EvoNet/test_config.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(Parameters) BOOST_AUTO_TEST_CASE(sizeOfParametersTest) { // Make the test tuple EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 32); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 64); auto parameters = std::make_tuple(id, data_dir, batch_size, memory_size); // Test the size size_t my_tuple_size = sizeOfParameters(parameters); BOOST_CHECK_EQUAL(my_tuple_size, 4); } BOOST_AUTO_TEST_CASE(loadParametersFromCsvTest) { // Make the test tuple EvoNetParameters::General::ID id("id", -1); EvoNetParameters::General::DataDir data_dir("data_dir", std::string("")); EvoNetParameters::ModelTrainer::BatchSize batch_size("batch_size", 0); EvoNetParameters::ModelTrainer::MemorySize memory_size("memory_size", 0); auto parameters = std::make_tuple(id, data_dir, batch_size, memory_size); // Test reading in the parameters file const int id_int = -1; const std::string parameters_filename = EVONET_GET_TEST_DATA_PATH("Parameters.csv"); LoadParametersFromCsv loadParametersFromCsv(id_int, parameters_filename); parameters = std::apply([&loadParametersFromCsv](auto&& ...args) { return loadParametersFromCsv(args...); }, parameters); BOOST_CHECK_EQUAL(std::get<EvoNetParameters::ModelTrainer::BatchSize>(parameters).get(), 32); BOOST_CHECK_EQUAL(std::get<EvoNetParameters::ModelTrainer::MemorySize>(parameters).get(), 64); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE MetabolomicsClassificationDataSimulator test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/simulator/MetabolomicsClassificationDataSimulator.h> #include <EvoNet/test_config.h> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(metabolomicsClasificationDataSimulator) BOOST_AUTO_TEST_CASE(constructor) { MetabolomicsClassificationDataSimulator<float>* ptr = nullptr; MetabolomicsClassificationDataSimulator<float>* nullPointer = nullptr; ptr = new MetabolomicsClassificationDataSimulator<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { MetabolomicsClassificationDataSimulator<float>* ptr = nullptr; ptr = new MetabolomicsClassificationDataSimulator<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(readAndProcessMetabolomicsTrainingAndValidationData) { // parameters for testing std::string biochem_rxns_filename = EVONET_GET_TEST_DATA_PATH("RBCGlycolysis.csv"); std::string metabo_data_filename_train = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_train.csv"); std::string meta_data_filename_train = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_train.csv"); std::string metabo_data_filename_test = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_Metabolomics_test.csv"); std::string meta_data_filename_test = EVONET_GET_TEST_DATA_PATH("PLT_timeCourse_MetaData_test.csv"); const int n_epochs = 12; const int batch_size = 64; const int memory_size = 1; int n_reps_per_sample = n_epochs * batch_size / 4; // data structures needed for testing Eigen::Tensor<float, 1> metabo_data_expected; Eigen::Tensor<float, 1> metabo_labels_expected; Eigen::Tensor<float, 1> input_test; Eigen::Tensor<float, 1> loss_output_test; Eigen::Tensor<float, 1> metric_output_test; // define the data simulator MetabolomicsClassificationDataSimulator<float> metabolomics_data; int n_reaction_ids_training, n_labels_training, n_component_group_names_training; int n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation; // Test with use_concentrations, iter_values, fill_zero, w/o fold change, w/o offline transformation, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, true, false, false, true, false, false, true, false, "S01_D01_PLT_25C_0hr", 10, false, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 0); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 0); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data metabo_data_expected.resize(n_component_group_names_training); metabo_data_expected.setValues({ 0.926902,0.926902,0.528539,0.160019,1.12682,1.22998,0.0190169,0.450247,13.7926,0.00209141,0.340882,10.1745,23.9011,2.14484,4.50086,168.859,0.0182855,21.4171,1.06531,0.0819526,0.340459,0.643785,0.111617,0.00248486,0.0121332,17.836,0.00217249,0.0259041,7.11653,0.290879,3.44139,1.57565,0.961545,3.38213,0.100865,13.1692,50.2542,130.873,2.07786,19.1111,1.53861,1.19125,13.8566,0.0490362,13.8038,11.4394,4.06357,0.235487,8.97541,0.0716525,0.352367,3.36852,358.106,1.63892,1.92487,0.182818,4.8659,0.346883,0.0258523,10.3065,18.0953,0.218174,2.96289,0.000862999,2.56502,0.371797,0.903806,0.758988,4.29996,3.665,6.52141,2.26217,2.5102,1.05417,1.39991,0.644587,0.536492,0.0300802,46.5647,1.00421,2.60011 }); metabo_labels_expected.resize(n_labels_training); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data metabo_data_expected.resize(n_component_group_names_training); metabo_data_expected.setValues({ 0.265556,0.265556,0.791721,0.472251,0.432787,2.2351,0.00944648,0.247813,12.418,0.00141348,0.55053,16.8187,36.9833,2.13488,5.49174,179.439,0.0153601,28.1836,1.26304,0.0579334,0.290531,0.768753,0.102008,0.00386558,0.00689083,8.13605,0.00115203,0.00151532,5.04463,0,0.679528,0.831631,0.880628,2.99608,0.0236374,4.88753,51.9047,45.1772,1.48239,12.7094,1.05689,1.85818,22.8213,0.0334685,6.07156,7.07805,3.22018,0.0865703,10.317,0.0204963,2.79232,4.65322,171.598,0.95634,1.76564,0.100189,2.95791,0.189656,0.00894318,15.2019,21.9901,0.0690577,3.59603,0.00207443,7.39086,0.152056,0.299171,1.11869,5.06563,5.21786,8.57755,2.12757,2.87938,0.667493,0.930508,0.5112,0.283961,0.00564798,101.789,0.762531,2.10564 }); metabo_labels_expected.resize(n_labels_training); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data metabo_data_expected.resize(n_component_group_names_validation); metabo_data_expected.setValues({ 0.926902,0.926902,0.001,0.160019,1.12682,1.22998,0.0190169,0.450247,13.7926,0.00209141,0.340882,10.1745,23.9011,2.14484,4.50086,168.859,0.0182855,21.4171,1.06531,0.0819526,0.340459,0.643785,0.111617,0.00248486,0.0121332,17.836,0.00217249,0.0259041,7.11653,0.290879,3.44139,1.57565,0.961545,3.38213,0.100865,13.1692,50.2542,130.873,2.07786,19.1111,1.53861,1.19125,13.8566,0.0490362,13.8038,11.4394,4.06357,0.235487,8.97541,0.0716525,0.352367,3.36852,358.106,1.63892,1.92487,0.182818,4.8659,0.346883,0.0258523,10.3065,18.0953,0.218174,2.96289,0.000862999,2.56502,0.371797,0.903806,0.758988,4.29996,3.665,6.52141,2.26217,2.5102,1.05417,1.39991,0.644587,0.536492,0.0300802,46.5647,1.00421,2.60011, }); metabo_labels_expected.resize(n_labels_validation); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data metabo_data_expected.resize(n_component_group_names_validation); metabo_data_expected.setValues({ 0.165039,0.165039,0.001,0.622863,0.21407,9.03963,0.0114481,0.0558456,10.517,0.00114876,0.717408,11.6496,32.6992,1.57155,5.59629,39.0179,0.0128469,42.2866,1.2933,0.0223656,0.87308,0.3571,0.122049,0.00296956,0,7.70971,0.0010124,0.00450194,2.64212,0,0.504629,0.959799,0.104165,2.34459,0.0660267,6.86989,40.1772,32.413,2.95238,24.9857,0.62782,2.14593,19.5942,0.00573079,2.62684,4.63538,2.15791,0.207469,13.4438,0.021178,4.38389,4.26276,371.016,1.20303,1.2614,0.127565,2.22657,0.166088,0.00586264,19.8376,19.7377,0.073393,3.39037,0.00272942,11.0637,0.115039,0.340026,0.435979,5.21338,5.7024,8.28379,2.38046,3.00911,0.614063,0.366475,0.3324,0.21275,0.0288914,168.286,0.752142,1.45487 }); metabo_labels_expected.resize(n_labels_validation); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_concentrations, iter_values, fill_zero, w/o fold change, offline_linear_scale, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, true, false, false, true, false, false, true, false, "S01_D01_PLT_25C_0hr", 10, true, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 0); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 0); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data metabo_data_expected.resize(n_component_group_names_training); metabo_data_expected.setValues({ 0.0018235,0.0018235,0.0010398,0.000314806,0.00221681,0.00241974,3.74122e-05,0.000885776,0.0271343,4.11445e-06,0.000670621,0.0200164,0.0470209,0.00421956,0.00885458,0.332198,3.59732e-05,0.042134,0.00209579,0.000161226,0.000669789,0.00126652,0.000219585,4.88849e-06,2.38698e-05,0.0350889,4.27396e-06,5.09615e-05,0.0140004,0.000572249,0.00677028,0.0030998,0.00189166,0.00665369,0.000198433,0.0259078,0.0988657,0.257468,0.00408779,0.0375975,0.00302693,0.00234356,0.0272602,9.64694e-05,0.0271563,0.0225049,0.00799431,0.000463277,0.0176574,0.000140963,0.000693216,0.00662692,0.704506,0.00322427,0.00378682,0.00035966,0.00957273,0.000682427,5.08595e-05,0.020276,0.0355991,0.000429215,0.00582892,1.69779e-06,0.00504618,0.00073144,0.00177807,0.00149317,0.00845936,0.00721019,0.0128296,0.00445039,0.00493834,0.00207388,0.00275405,0.0012681,0.00105545,5.91771e-05,0.0916072,0.00197559,0.00511523 }); metabo_labels_expected.resize(n_labels_training); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data metabo_data_expected.resize(n_component_group_names_training); metabo_data_expected.setValues({ 0.000522431,0.000522431,0.00155756,0.000929064,0.000851426,0.00439713,1.85842e-05,0.000487525,0.0244301,2.78076e-06,0.00108306,0.0330876,0.0727577,0.00419997,0.010804,0.353011,3.0218e-05,0.055446,0.00248479,0.000113973,0.000571566,0.00151238,0.000200681,7.6048e-06,1.35564e-05,0.0160061,2.2664e-06,2.9811e-06,0.00992435,0,0.00133684,0.00163608,0.00173247,0.00589422,4.65022e-05,0.0096153,0.102113,0.0888775,0.00291632,0.0250034,0.00207923,0.00365562,0.0448965,6.58429e-05,0.0119447,0.0139247,0.00633509,0.000170311,0.0202967,4.03227e-05,0.00549336,0.00915433,0.337587,0.00188142,0.00347356,0.000197102,0.00581913,0.000373112,1.7594e-05,0.0299069,0.0432613,0.000135858,0.00707451,4.08106e-06,0.0145401,0.000299141,0.000588563,0.00220082,0.00996567,0.0102652,0.0168747,0.00418559,0.00566463,0.00131317,0.0018306,0.00100569,0.00055864,1.11113e-05,0.20025,0.00150013,0.00414245 }); metabo_labels_expected.resize(n_labels_training); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data metabo_data_expected.resize(n_component_group_names_validation); metabo_data_expected.setValues({ 0.0018235,0.0018235,1.96731e-06,0.000314806,0.00221681,0.00241974,3.74122e-05,0.000885776,0.0271343,4.11445e-06,0.000670621,0.0200164,0.0470209,0.00421956,0.00885458,0.332198,3.59732e-05,0.042134,0.00209579,0.000161226,0.000669789,0.00126652,0.000219585,4.88849e-06,2.38698e-05,0.0350889,4.27396e-06,5.09615e-05,0.0140004,0.000572249,0.00677028,0.0030998,0.00189166,0.00665369,0.000198433,0.0259078,0.0988657,0.257468,0.00408779,0.0375975,0.00302693,0.00234356,0.0272602,9.64694e-05,0.0271563,0.0225049,0.00799431,0.000463277,0.0176574,0.000140963,0.000693216,0.00662692,0.704506,0.00322427,0.00378682,0.00035966,0.00957273,0.000682427,5.08595e-05,0.020276,0.0355991,0.000429215,0.00582892,1.69779e-06,0.00504618,0.00073144,0.00177807,0.00149317,0.00845936,0.00721019,0.0128296,0.00445039,0.00493834,0.00207388,0.00275405,0.0012681,0.00105545,5.91771e-05,0.0916072,0.00197559,0.00511523 }); metabo_labels_expected.resize(n_labels_validation); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data metabo_data_expected.resize(n_component_group_names_validation); metabo_data_expected.setValues({ 0.000324683,0.000324683,1.96731e-06,0.00122536,0.000421143,0.0177838,2.2522e-05,0.000109866,0.0206903,2.25998e-06,0.00141136,0.0229183,0.0643295,0.00309173,0.0110096,0.0767604,2.52738e-05,0.0831909,0.00254433,4.40002e-05,0.00171762,0.000702526,0.000240109,5.84204e-06,0,0.0151674,1.9917e-06,8.85672e-06,0.00519788,0,0.000992761,0.00188822,0.000204924,0.00461253,0.000129895,0.0135152,0.0790411,0.0637664,0.00580824,0.0491546,0.00123512,0.00422171,0.038548,1.12742e-05,0.00516782,0.00911924,0.00424527,0.000408156,0.026448,4.16637e-05,0.00862448,0.00838618,0.729904,0.00236673,0.00248156,0.00025096,0.00438035,0.000326747,1.15336e-05,0.0390268,0.0388302,0.000144387,0.0066699,5.36963e-06,0.0217657,0.000226318,0.000668937,0.000857705,0.0102563,0.0112184,0.0162968,0.0046831,0.00591985,0.00120805,0.00072097,0.000653933,0.000418545,5.68384e-05,0.331071,0.0014797,0.00286219 }); metabo_labels_expected.resize(n_labels_validation); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_concentrations, iter_values, fill_zero, apply_fold_change, w/o offline transformation, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, true, false, false, true, false, false, true, true, "S01_D01_PLT_25C_0hr", 10, false, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 0); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 0); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data metabo_data_expected.resize(n_component_group_names_training); metabo_data_expected.setValues({ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }); metabo_labels_expected.resize(n_labels_training); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data metabo_data_expected.resize(n_component_group_names_training); metabo_data_expected.setValues({ -0.546827,-0.546827,-0.101116,0.539373,-0.360525,0.321447,-0.39306,-0.210436,-0.060354,-0.130022,0.156553,0.254379,0.0286196,-0.0688724,0.0828516,-0.0289225,-0.0575143,-0.190464,0.23294,-0.139982,0.0126946,0.0446472,0.00337126,0.288224,-0.185271,-0.373936,-0.309864,-1,0.237554,0,-0.54831,-0.202118,-0.508552,-0.0364478,-0.402288,-0.272806,0.0109204,-0.534155,0.0556125,-0.134117,0.0587907,0.292802,0.0109027,-0.128265,-0.330514,-0.220343,-0.252261,-0.453414,0.129335,-0.544705,0.861516,0.0694954,-0.346926,-0.190808,0.0321057,-0.15278,-0.226965,-0.221589,-0.276213,0.303512,0.0749598,-0.636405,0.238492,0.322603,0.529939,-0.641168,-0.712761,0.178753,0.0489525,0.119797,0.0813833,0.0924567,0.164172,-0.21426,-0.196875,-0.143367,-0.376417,-0.59582,0.159936,-0.0493322,-0.0343665 }); metabo_labels_expected.resize(n_labels_training); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data metabo_data_expected.resize(n_component_group_names_validation); metabo_data_expected.setValues({ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }); metabo_labels_expected.resize(n_labels_validation); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data metabo_data_expected.resize(n_component_group_names_validation); metabo_data_expected.setValues({ -0.762389,-0.762389,0,0.584782,-0.714069,0.911599,-0.247092,-0.568022,-0.12364,-0.107453,0.325132,0.197282,0.130092,-0.194354,0.112712,-0.75685,-0.147338,-0.216109,0.142816,-0.604204,0.444315,-0.280296,0.0786749,-0.116181,0,-0.393656,-0.419955,-0.404676,-0.053696,0,-0.877782,-0.171411,-1,-0.457568,-0.142088,-0.143169,-0.0683435,-0.635325,0.247625,0.1502,-0.217976,0.20326,0.0802466,-0.906257,-0.732069,-0.411958,-0.441715,-0.18427,0.0478741,-0.557619,1,0.14009,-0.0129507,-0.129144,-0.134047,-0.0362979,-0.320869,-0.291483,-0.591754,-0.0525188,-0.0157663,-0.530236,0.233763,0.181443,0.577785,-0.410605,-0.492643,-0.1118,0.089699,0.182599,0.0761641,0.0340422,0.0408017,-0.235639,-0.609576,-0.297319,-0.421356,-0.0330014,0.3002,-0.183784,-0.279151 }); metabo_labels_expected.resize(n_labels_validation); metabo_labels_expected.setValues({ 1 }); input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_CLOSE(input_test(i), metabo_data_expected(i), 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_concentrations, sample_values, w/o fold change, w/o offline transformation, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, true, false, true, false, false, false, false, false, "S01_D01_PLT_25C_0hr", 10, false, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 0); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 0); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_GE(input_test(i), 0.00054280, 1e-6); BOOST_CHECK_LE(input_test(i), 508.3080903, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-6); BOOST_CHECK_LE(input_test(i), 508.3080903, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_GE(input_test(i), 0.00054280, 1e-6); BOOST_CHECK_LE(input_test(i), 508.3080903, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-6); BOOST_CHECK_LE(input_test(i), 508.3080903, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_concentrations, sample_values, w/o fold change, offline_linear_scale, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, true, false, true, false, false, false, false, false, "S01_D01_PLT_25C_0hr", 10, true, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 0); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 0); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_concentrations, sample_values, apply_fold_change, w/o offline transformation, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, true, false, true, false, false, false, false, true, "S01_D01_PLT_25C_0hr", 10, false, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 0); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 0); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_GE(input_test(i), -1, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_training })); for (int i = 0; i < n_component_group_names_training; ++i) { BOOST_CHECK_GE(input_test(i), -1, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_GE(input_test(i), -1, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_component_group_names_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_component_group_names_validation })); for (int i = 0; i < n_component_group_names_validation; ++i) { BOOST_CHECK_GE(input_test(i), -1, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_MARs, sample_values, w/o fold change, w/o offline transformation, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, false, true, true, false, false, false, false, false, "S01_D01_PLT_25C_0hr", 10, false, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 11); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 11); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_training })); for (int i = 0; i < n_reaction_ids_training; ++i) { BOOST_CHECK_GE(input_test(i), 1e-3, 1e-3); BOOST_CHECK_LE(input_test(i), 1e3, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_training })); for (int i = 0; i < n_reaction_ids_training; ++i) { BOOST_CHECK_GE(input_test(i), 1e-3, 1e-3); BOOST_CHECK_LE(input_test(i), 1e3, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_validation })); for (int i = 0; i < n_reaction_ids_validation; ++i) { BOOST_CHECK_GE(input_test(i), 1e-3, 1e-3); BOOST_CHECK_LE(input_test(i), 1e3, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_validation })); for (int i = 0; i < n_reaction_ids_validation; ++i) { BOOST_CHECK_GE(input_test(i), 1e-3, 1e-3); BOOST_CHECK_LE(input_test(i), 1e3, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_MARs, sample_values, w/o fold change, offline_linear_scale, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, false, true, true, false, false, false, false, false, "S01_D01_PLT_25C_0hr", 10, true, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 11); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 11); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_training })); for (int i = 0; i < n_reaction_ids_training; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_training })); for (int i = 0; i < n_reaction_ids_training; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_validation })); for (int i = 0; i < n_reaction_ids_validation; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_validation })); for (int i = 0; i < n_reaction_ids_validation; ++i) { BOOST_CHECK_GE(input_test(i), 0, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_MARs, sample_values, apply_fold_change, w/o offline transformation, w/o online transformation metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, false, true, true, false, false, false, false, true, "S01_D01_PLT_25C_0hr", 10, false, false, false, false, false, false, n_reps_per_sample, false, false, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 11); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 11); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); // Test the head of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_training })); for (int i = 0; i < n_reaction_ids_training; ++i) { BOOST_CHECK_GE(input_test(i), -1, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the training data input_test = metabolomics_data.input_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_training })); for (int i = 0; i < n_reaction_ids_training; ++i) { BOOST_CHECK_GE(input_test(i), -1, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); metric_output_test = metabolomics_data.metric_output_data_training_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_training, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_training })); for (int i = 0; i < n_labels_training; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the head of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_validation })); for (int i = 0; i < n_reaction_ids_validation; ++i) { BOOST_CHECK_GE(input_test(i), -1, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ 0, 0, 0, 0 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test the tail of the validation data input_test = metabolomics_data.input_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_reaction_ids_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_reaction_ids_validation })); for (int i = 0; i < n_reaction_ids_validation; ++i) { BOOST_CHECK_GE(input_test(i), -1, 1e-3); BOOST_CHECK_LE(input_test(i), 1, 1e-3); } loss_output_test = metabolomics_data.loss_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); metric_output_test = metabolomics_data.metric_output_data_validation_.slice(Eigen::array<Eigen::Index, 4>({ batch_size - 1, memory_size - 1, 0, n_epochs - 1 }), Eigen::array<Eigen::Index, 4>({ 1, 1, n_labels_validation, 1 }) ).reshape(Eigen::array<Eigen::Index, 1>({ n_labels_validation })); for (int i = 0; i < n_labels_validation; ++i) { BOOST_CHECK_CLOSE(loss_output_test(i), metabo_labels_expected(i), 1e-4); BOOST_CHECK_CLOSE(metric_output_test(i), metabo_labels_expected(i), 1e-4); } // Test with use_concentrations, iter_values, fill_zero, w/o fold change, w/o offline transformation, w/o online transformation, and shuffle_data_and_labels metabolomics_data.readAndProcessMetabolomicsTrainingAndValidationData( n_reaction_ids_training, n_labels_training, n_component_group_names_training, n_reaction_ids_validation, n_labels_validation, n_component_group_names_validation, biochem_rxns_filename, metabo_data_filename_train, meta_data_filename_train, metabo_data_filename_test, meta_data_filename_test, true, false, false, true, false, false, true, false, "S01_D01_PLT_25C_0hr", 10, false, false, false, false, false, false, n_reps_per_sample, false, true, n_epochs, batch_size, memory_size); BOOST_CHECK_EQUAL(n_reaction_ids_training, 0); BOOST_CHECK_EQUAL(n_labels_training, 1); BOOST_CHECK_EQUAL(n_component_group_names_training, 81); BOOST_CHECK_EQUAL(n_reaction_ids_validation, 0); BOOST_CHECK_EQUAL(n_labels_validation, 1); BOOST_CHECK_EQUAL(n_component_group_names_validation, 81); BOOST_CHECK_EQUAL(metabolomics_data.labels_training_.at(0), "D01"); BOOST_CHECK_EQUAL(metabolomics_data.labels_validation_.at(0), "D01"); } BOOST_AUTO_TEST_SUITE_END()<file_sep> cmake_minimum_required(VERSION 3.8.2 FATAL_ERROR) project("EvoNet_class_tests_evonet") #------------------------------------------------------------------------------ # Configure test file to get the TEST_DATA_PATH into the tests set(CF_EVONET_TEST_DATA_PATH "${PROJECT_SOURCE_DIR}/data/") set (CONFIGURED_TEST_CONFIG_H "${PROJECT_BINARY_DIR}/include/EvoNet/test_config.h") configure_file(${PROJECT_SOURCE_DIR}/include/EvoNet/test_config.h.in ${CONFIGURED_TEST_CONFIG_H}) #------------------------------------------------------------------------------ # Boost #------------------------------------------------------------------------------ find_package(boost 1.64 COMPONENTS unit_test_framework) if(Boost_FOUND) message(STATUS "Found Boost version ${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION}.${Boost_SUBMINOR_VERSION}" ) set(CF_EVONET_BOOST_VERSION_MAJOR ${Boost_MAJOR_VERSION}) set(CF_EVONET_BOOST_VERSION_MINOR ${Boost_MINOR_VERSION}) set(CF_EVONET_BOOST_VERSION_SUBMINOR ${Boost_SUBMINOR_VERSION}) set(CF_EVONET_BOOST_VERSION ${Boost_VERSION}) else() message(FATAL_ERROR "Boost or one of its components not found!") endif() #------------------------------------------------------------------------------ # get the test executables include(executables.cmake) #------------------------------------------------------------------------------ # Include directories for tests set(EVONET_CLASS_TESTS_INTERNAL_INCLUDE_DIRECTORIES "${PROJECT_BINARY_DIR}/include/") # add EvoNet directories set(EVONET_CLASS_TESTS_EXTERNAL_INCLUDE_DIRECTORIES "${EvoNet_INCLUDE_DIRECTORIES};${Boost_INCLUDE_DIRS}") include_directories(${EVONET_CLASS_TESTS_INTERNAL_INCLUDE_DIRECTORIES}) include_directories(SYSTEM ${EVONET_CLASS_TESTS_EXTERNAL_INCLUDE_DIRECTORIES}) #------------------------------------------------------------------------------ # disable optimization for tests for gcc like compilers if (CMAKE_COMPILER_IS_INTELCXX OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG) set(_TMP_CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}) set(CMAKE_CXX_FLAGS_RELEASE "-O0") endif() #------------------------------------------------------------------------------ # core add_custom_target(CORE_TEST) add_dependencies(CORE_TEST ${core_executables_list}) # io add_custom_target(IO_TEST) add_dependencies(IO_TEST ${io_executables_list}) # ml add_custom_target(ML_TEST) add_dependencies(ML_TEST ${ml_executables_list}) # ml add_custom_target(MODELS_TEST) add_dependencies(MODELS_TEST ${models_executables_list}) # algorithm add_custom_target(SIMULATOR_TEST) add_dependencies(SIMULATOR_TEST ${simulator_executables_list}) # algorithm add_custom_target(GRAPH_TEST) add_dependencies(GRAPH_TEST ${graph_executables_list}) #------------------------------------------------------------------------------ # Add the actual tests foreach(_class_test ${TEST_executables}) add_executable(${_class_test} source/${_class_test}) target_link_libraries(${_class_test} ${EvoNet_LIBRARIES} ${Boost_LIBRARIES}) add_test(${_class_test} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${_class_test}) # only add OPENMP flags to gcc linker (execpt Mac OS X, due to compiler bug # see https://sourceforge.net/apps/trac/open-ms/ticket/280 for details) if (OPENMP_FOUND AND NOT MSVC AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") set_target_properties(${_class_test} PROPERTIES LINK_FLAGS ${OpenMP_CXX_FLAGS}) endif() if (EVONET_CUDA) target_link_libraries(${_class_test} ${CUDA_LIBRARIES}) set_property(TARGET ${_class_test} PROPERTY CUDA_STANDARD 11) endif() endforeach(_class_test) #------------------------------------------------------------------------------ # restore old compiler flags if (CMAKE_COMPILER_IS_INTELCXX OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG) set(${CMAKE_CXX_FLAGS_RELEASE} ${_TMP_CMAKE_CXX_FLAGS_RELEASE}) endif() #------------------------------------------------------------------------------ # add filenames to Visual Studio solution tree set(sources_VS) foreach(i ${TEST_executables}) list(APPEND sources_VS "${i}") endforeach(i) source_group("" FILES ${sources_VS}) <file_sep>include(${CMAKE_ROOT}/Modules/ExternalProject.cmake) set_property (DIRECTORY PROPERTY EP_BASE Dependencies) set (DEPENDENCIES) set (EIGEN_VERSION master) message (STATUS "Adding Eigen ${EIGEN_VERSION} as an external project.") #if (WIN32) # set (EIGEN_INSTALL_CMD "move Eigen unsupported <INSTALL_DIR>/") #elseif (UNIX) # set (EIGEN_INSTALL_CMD "mv Eigen unsupported <INSTALL_DIR>/") #endif () ExternalProject_Add(eigen GIT_REPOSITORY "https://github.com/eigenteam/eigen-git-mirror.git" #GIT_TAG ${EIGEN_VERSION} # Need the dev branch to compile use MSVC UPDATE_COMMAND "" CONFIGURE_COMMAND "" BUILD_IN_SOURCE 1 BUILD_COMMAND "" INSTALL_COMMAND "" #INSTALL_COMMAND ${EIGEN_INSTALL_CMD} #INSTALL_DIR include )<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE WeightTensorData test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/WeightTensorData.h> #include <iostream> using namespace EvoNet; using namespace std; BOOST_AUTO_TEST_SUITE(weightTensorData) BOOST_AUTO_TEST_CASE(constructor) { WeightTensorDataCpu<float>* ptr = nullptr; WeightTensorDataCpu<float>* nullPointer = nullptr; ptr = new WeightTensorDataCpu<float>(); BOOST_CHECK_NE(ptr, nullPointer); delete ptr; } BOOST_AUTO_TEST_CASE(destructor) { WeightTensorDataCpu<float>* ptr = nullptr; ptr = new WeightTensorDataCpu<float>(); delete ptr; } BOOST_AUTO_TEST_CASE(comparison) { WeightTensorDataCpu<float> weight, weight_test; BOOST_CHECK(weight == weight_test); } // TODO copy test! #if COMPILE_WITH_CUDA BOOST_AUTO_TEST_CASE(gettersAndSetters2) { WeightTensorDataGpu<float> weight; weight.setLayer1Size(2); weight.setLayer2Size(3); weight.setNSolverParams(4); weight.setNSharedWeights(2); Eigen::Tensor<float, 2> weight_tensor(2, 3), error_tensor(2, 3); Eigen::Tensor<float, 3> solver_params_tensor(2, 3, 4), shared_weights_tensor(2, 3, 2); weight_tensor.setConstant(0.5); error_tensor.setConstant(1); solver_params_tensor.setConstant(2); shared_weights_tensor.setConstant(3); weight.setWeight(weight_tensor); weight.setError(error_tensor); weight.setSolverParams(solver_params_tensor); weight.setSharedWeights(shared_weights_tensor); BOOST_CHECK_EQUAL(weight.getLayer1Size(), 2); BOOST_CHECK_EQUAL(weight.getLayer2Size(), 3); BOOST_CHECK_EQUAL(weight.getNSolverParams(), 4); BOOST_CHECK_EQUAL(weight.getNSharedWeights(), 2); BOOST_CHECK_EQUAL(weight.getWeight()(1, 2), 0.5); BOOST_CHECK_EQUAL(weight.getError()(0, 0), 1); BOOST_CHECK_EQUAL(weight.getSolverParams()(0, 0, 0), 2); BOOST_CHECK_EQUAL(weight.getSharedWeights()(0, 0, 0), 3); // Test mutability weight.getWeight()(0, 0) = 5; weight.getError()(0, 0) = 6; weight.getSolverParams()(0, 0, 0) = 7; weight.getSharedWeights()(0, 0, 0) = 8; BOOST_CHECK_EQUAL(weight.getWeight()(0, 0), 5); BOOST_CHECK_EQUAL(weight.getError()(0, 0), 6); BOOST_CHECK_EQUAL(weight.getSolverParams()(0, 0, 0), 7); BOOST_CHECK_EQUAL(weight.getSharedWeights()(0, 0, 0), 8); } #endif BOOST_AUTO_TEST_CASE(gettersAndSetters) { WeightTensorDataCpu<float> weight; weight.setLayer1Size(2); weight.setLayer2Size(3); weight.setNSolverParams(4); weight.setNSharedWeights(2); BOOST_CHECK_EQUAL(weight.getTensorSize(), 2 * 3 * sizeof(float)); BOOST_CHECK_EQUAL(weight.getSolverParamsSize(), 2 * 3 * 4 * sizeof(float)); BOOST_CHECK_EQUAL(weight.getSharedWeightsSize(), 2 * 3 * 2 * sizeof(float)); weight.setSinkLayerIntegration("SumOp"); BOOST_CHECK_EQUAL(weight.getSinkLayerIntegration(), "SumOp"); } BOOST_AUTO_TEST_CASE(gettersAndSetters1) { WeightTensorDataCpu<float> weight; weight.setLayer1Size(2); weight.setLayer2Size(3); weight.setNSolverParams(4); weight.setNSharedWeights(2); Eigen::Tensor<float, 2> weight_tensor(2, 3), error_tensor(2, 3); Eigen::Tensor<float, 3> solver_params_tensor(2, 3, 4), shared_weights_tensor(2, 3, 2); weight_tensor.setConstant(0.5); error_tensor.setConstant(1); solver_params_tensor.setConstant(2); shared_weights_tensor.setConstant(3); weight.setWeight(weight_tensor); weight.setError(error_tensor); weight.setSolverParams(solver_params_tensor); weight.setSharedWeights(shared_weights_tensor); BOOST_CHECK_EQUAL(weight.getLayer1Size(), 2); BOOST_CHECK_EQUAL(weight.getLayer2Size(), 3); BOOST_CHECK_EQUAL(weight.getNSolverParams(), 4); BOOST_CHECK_EQUAL(weight.getNSharedWeights(), 2); BOOST_CHECK_EQUAL(weight.getWeight()(1, 2), 0.5); BOOST_CHECK_EQUAL(weight.getError()(0, 0), 1); BOOST_CHECK_EQUAL(weight.getSolverParams()(0, 0, 0), 2); BOOST_CHECK_EQUAL(weight.getSharedWeights()(0, 0, 0), 3); // Test mutability weight.getWeight()(0, 0) = 5; weight.getError()(0, 0) = 6; weight.getSolverParams()(0, 0, 0) = 7; weight.getSharedWeights()(0, 0, 0) = 8; BOOST_CHECK_EQUAL(weight.getWeight()(0, 0), 5); BOOST_CHECK_EQUAL(weight.getError()(0, 0), 6); BOOST_CHECK_EQUAL(weight.getSolverParams()(0, 0, 0), 7); BOOST_CHECK_EQUAL(weight.getSharedWeights()(0, 0, 0), 8); } BOOST_AUTO_TEST_CASE(initWeightTensorData) { WeightTensorDataCpu<float> weight; std::vector<std::pair<int, int>> weight_indices = { std::make_pair(0, 0), std::make_pair(1, 0), std::make_pair(0, 1), std::make_pair(1, 1), std::make_pair(0, 2), std::make_pair(1, 2) }; std::map<std::string, std::vector<std::pair<int, int>>> shared_weight_indices = { {"0", {std::make_pair(0, 0), std::make_pair(1, 0)}}, {"1", {std::make_pair(0, 1), std::make_pair(1, 1)}}, {"2", {std::make_pair(0, 2), std::make_pair(1, 2)}} }; std::vector<float> weight_values = { 1, 1, 2, 2, 3, 3 }; std::vector<float> solver_params = {1, 2, 3, 4}; weight.initWeightTensorData(2, 3, weight_indices, shared_weight_indices, weight_values, true, solver_params, "SumOp"); // Test the layer and param sizes BOOST_CHECK_EQUAL(weight.getLayer1Size(), 2); BOOST_CHECK_EQUAL(weight.getLayer2Size(), 3); BOOST_CHECK_EQUAL(weight.getNSolverParams(), 4); BOOST_CHECK_EQUAL(weight.getNSharedWeights(), 3); for (int j = 0; j < 3; ++j) { for (int i = 0; i < 2; ++i) { BOOST_CHECK_EQUAL(weight.getWeight()(i, j), weight_values[i + j * 2]); } } for (int j = 0; j < 3; ++j) { for (int i = 0; i < 2; ++i) { if (std::to_string(j) == "0") BOOST_CHECK_EQUAL(weight.getSharedWeights()(i, j, 0), 1); else BOOST_CHECK_EQUAL(weight.getSharedWeights()(i, j, 0), 0); if (std::to_string(j) == "1") BOOST_CHECK_EQUAL(weight.getSharedWeights()(i, j, 1), 1); else BOOST_CHECK_EQUAL(weight.getSharedWeights()(i, j, 1), 0); if (std::to_string(j) == "2") BOOST_CHECK_EQUAL(weight.getSharedWeights()(i, j, 2), 1); else BOOST_CHECK_EQUAL(weight.getSharedWeights()(i, j, 2), 0); } } BOOST_CHECK_EQUAL(weight.getError()(0, 0), 0.0); BOOST_CHECK_EQUAL(weight.getError()(1, 2), 0.0); BOOST_CHECK_EQUAL(weight.getSolverParams()(0, 0, 0), 1); BOOST_CHECK_EQUAL(weight.getSolverParams()(0, 0, 1), 2); BOOST_CHECK_EQUAL(weight.getSolverParams()(0, 0, 2), 3); BOOST_CHECK_EQUAL(weight.getSolverParams()(0, 0, 3), 4); } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #define BOOST_TEST_MODULE ModelInterpreter DCG test suite #include <boost/test/included/unit_test.hpp> #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> #include <EvoNet/ml/ModelBuilder.h> // comprehensive architecture tests using namespace EvoNet; using namespace std; Model<float> makeModelFCSum() { /** * Directed Cyclic Graph Toy Network Model */ Node<float> i1, h1, o1, b1, b2; Link l1, l2, l3, lb1, lb2; Weight<float> w1, w2, w3, wb1, wb2; Model<float> model2; // Toy network: 1 hidden layer, fully connected, DCG i1 = Node<float>("0", NodeType::input, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); h1 = Node<float>("1", NodeType::hidden, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); o1 = Node<float>("2", NodeType::output, NodeStatus::initialized, std::make_shared<ReLUOp<float>>(ReLUOp<float>()), std::make_shared<ReLUGradOp<float>>(ReLUGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b1 = Node<float>("3", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); b2 = Node<float>("4", NodeType::bias, NodeStatus::activated, std::make_shared<LinearOp<float>>(LinearOp<float>()), std::make_shared<LinearGradOp<float>>(LinearGradOp<float>()), std::make_shared<SumOp<float>>(SumOp<float>()), std::make_shared<SumErrorOp<float>>(SumErrorOp<float>()), std::make_shared<SumWeightGradOp<float>>(SumWeightGradOp<float>())); // weights std::shared_ptr<WeightInitOp<float>> weight_init; std::shared_ptr<SolverOp<float>> solver; // weight_init.reset(new RandWeightInitOp(1.0)); // No random init for testing weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w1 = Weight<float>("0", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w2 = Weight<float>("1", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); w3 = Weight<float>("2", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb1 = Weight<float>("3", weight_init, solver); weight_init = std::make_shared<ConstWeightInitOp<float>>(ConstWeightInitOp<float>(1.0)); solver = std::make_shared<SGDOp<float>>(SGDOp<float>(0.01, 0.9)); wb2 = Weight<float>("4", weight_init, solver); weight_init.reset(); solver.reset(); // links l1 = Link("0", "0", "1", "0"); l2 = Link("1", "1", "2", "1"); l3 = Link("2", "1", "1", "2"); // cycle lb1 = Link("3", "3", "1", "3"); lb2 = Link("4", "4", "2", "4"); model2.setId(2); model2.addNodes({ i1, h1, o1, b1, b2 }); model2.addWeights({ w1, w2, w3, wb1, wb2 }); model2.addLinks({ l1, l2, l3, lb1, lb2 }); model2.findCycles(); return model2; } BOOST_AUTO_TEST_SUITE(modelInterpreter_DCG) BOOST_AUTO_TEST_CASE(constructor) { ModelInterpreterDefaultDevice<float>* ptr = nullptr; ModelInterpreterDefaultDevice<float>* nullPointer = nullptr; ptr = new ModelInterpreterDefaultDevice<float>(); BOOST_CHECK_NE(ptr, nullPointer); } BOOST_AUTO_TEST_CASE(destructor) { ModelInterpreterDefaultDevice<float>* ptr = nullptr; ptr = new ModelInterpreterDefaultDevice<float>(); delete ptr; } /** * Part 2 test suit for the ModelInterpreter class * * The following test methods that are * required of a standard recurrent neural network */ Model<float> model_getNextInactiveLayer = makeModelFCSum(); BOOST_AUTO_TEST_CASE(getNextInactiveLayerWOBiases) { // Toy network: 1 hidden layer, fully connected, DAG ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // get the next hidden layer std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getNextInactiveLayer, FP_operations_map, FP_operations_list); BOOST_CHECK_EQUAL(FP_operations_map.size(), 1); BOOST_CHECK_EQUAL(FP_operations_map.at("1"), 0); BOOST_CHECK_EQUAL(FP_operations_list.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "0"); } Model<float> model_getNextInactiveLayerBiases = makeModelFCSum(); BOOST_AUTO_TEST_CASE(getNextInactiveLayerBiases) { // Toy network: 1 hidden layer, fully connected, DAG // Model<float> model_FC_Sum = makeModelFCSum(); ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // get the next hidden layer std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getNextInactiveLayerBiases, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_getNextInactiveLayerBiases, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); BOOST_CHECK_EQUAL(FP_operations_map.size(), 1); BOOST_CHECK_EQUAL(FP_operations_map.at("1"), 0); BOOST_CHECK_EQUAL(FP_operations_list.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 2); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].source_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].weight->getName(), "3"); BOOST_CHECK_EQUAL(sink_nodes_with_biases2.size(), 1); BOOST_CHECK_EQUAL(sink_nodes_with_biases2[0], "1"); } Model<float> model_getNextInactiveLayerCycles = makeModelFCSum(); BOOST_AUTO_TEST_CASE(getNextInactiveLayerCycles) { // Toy network: 1 hidden layer, fully connected, DAG // Model<float> model_FC_Sum = makeModelFCSum(); ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // get the next hidden layer std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getNextInactiveLayerCycles, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_getNextInactiveLayerCycles, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::set<std::string> sink_nodes_with_cycles; model_interpreter.getNextInactiveLayerCycles(model_getNextInactiveLayerCycles, FP_operations_map, FP_operations_list, sink_nodes_with_cycles); BOOST_CHECK_EQUAL(FP_operations_map.size(), 1); BOOST_CHECK_EQUAL(FP_operations_map.at("1"), 0); BOOST_CHECK_EQUAL(FP_operations_list.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 3); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].source_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].weight->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].time_step, 1); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].weight->getName(), "2"); BOOST_CHECK_EQUAL(sink_nodes_with_cycles.size(), 1); BOOST_CHECK_EQUAL(sink_nodes_with_cycles.count("1"), 1); } Model<float> model_pruneInactiveLayerCycles = makeModelFCSum(); BOOST_AUTO_TEST_CASE(pruneInactiveLayerCycles) { // Toy network: 1 hidden layer, fully connected, DAG // Model<float> model_FC_Sum = makeModelFCSum(); ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // get the next hidden layer std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_pruneInactiveLayerCycles, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_pruneInactiveLayerCycles, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::set<std::string> sink_nodes_with_cycles; std::map<std::string, int> FP_operations_map_cycles = FP_operations_map; std::vector<OperationList<float>> FP_operations_list_cycles = FP_operations_list; model_interpreter.getNextInactiveLayerCycles(model_pruneInactiveLayerCycles, FP_operations_map_cycles, FP_operations_list_cycles, sink_nodes_with_cycles); model_interpreter.pruneInactiveLayerCycles(model_pruneInactiveLayerCycles, FP_operations_map, FP_operations_map_cycles, FP_operations_list, FP_operations_list_cycles, sink_nodes_with_cycles); BOOST_CHECK_EQUAL(FP_operations_map.size(), 1); BOOST_CHECK_EQUAL(FP_operations_map.at("1"), 0); BOOST_CHECK_EQUAL(FP_operations_list.size(), 1); BOOST_CHECK_EQUAL(FP_operations_list[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].result.sink_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments.size(), 3); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].source_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[1].weight->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].time_step, 1); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_list[0].arguments[2].weight->getName(), "2"); BOOST_CHECK_EQUAL(sink_nodes_with_cycles.size(), 1); BOOST_CHECK_EQUAL(sink_nodes_with_cycles.count("1"), 1); } Model<float> model_expandAllForwardPropogationOperations = makeModelFCSum(); BOOST_AUTO_TEST_CASE(expandAllForwardPropogationOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_expandAllForwardPropogationOperations, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_expandAllForwardPropogationOperations, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::set<std::string> sink_nodes_with_cycles; std::map<std::string, int> FP_operations_map_cycles = FP_operations_map; std::vector<OperationList<float>> FP_operations_list_cycles = FP_operations_list; model_interpreter.getNextInactiveLayerCycles(model_expandAllForwardPropogationOperations, FP_operations_map_cycles, FP_operations_list_cycles, sink_nodes_with_cycles); model_interpreter.pruneInactiveLayerCycles(model_expandAllForwardPropogationOperations, FP_operations_map, FP_operations_map_cycles, FP_operations_list, FP_operations_list_cycles, sink_nodes_with_cycles); std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); BOOST_CHECK_EQUAL(FP_operations_expanded.size(), 3); BOOST_CHECK_EQUAL(FP_operations_expanded[0].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[0].result.sink_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].source_node->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_expanded[0].arguments[0].weight->getName(), "0"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[1].result.sink_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].source_node->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[1].arguments[0].weight->getName(), "3"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].result.time_step, 0); BOOST_CHECK_EQUAL(FP_operations_expanded[2].result.sink_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments.size(), 1); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].time_step, 1); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].source_node->getName(), "1"); BOOST_CHECK_EQUAL(FP_operations_expanded[2].arguments[0].weight->getName(), "2"); } Model<float> model_getTensorOperations = makeModelFCSum(); BOOST_AUTO_TEST_CASE(getTensorOperations) { ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getTensorOperations, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_getTensorOperations, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::set<std::string> sink_nodes_with_cycles; std::map<std::string, int> FP_operations_map_cycles = FP_operations_map; std::vector<OperationList<float>> FP_operations_list_cycles = FP_operations_list; model_interpreter.getNextInactiveLayerCycles(model_getTensorOperations, FP_operations_map_cycles, FP_operations_list_cycles, sink_nodes_with_cycles); model_interpreter.pruneInactiveLayerCycles(model_getTensorOperations, FP_operations_map, FP_operations_map_cycles, FP_operations_list, FP_operations_list_cycles, sink_nodes_with_cycles); std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(identified_sink_nodes.size(), 3); BOOST_CHECK_EQUAL(identified_sink_nodes.count("1/0"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("1/1"), 1); BOOST_CHECK_EQUAL(identified_sink_nodes.count("1/2"), 1); BOOST_CHECK_EQUAL(tensor_ops.size(), 2); BOOST_CHECK_EQUAL(tensor_ops.at("1/0")[0], 0); BOOST_CHECK_EQUAL(tensor_ops.at("1/0")[1], 1); BOOST_CHECK_EQUAL(tensor_ops.at("1/2")[0], 2); } Model<float> model_getForwardPropogationLayerTensorDimensions = makeModelFCSum(); BOOST_AUTO_TEST_CASE(getForwardPropogationLayerTensorDimensions) { ModelInterpreterDefaultDevice<float> model_interpreter; // initialize nodes // NOTE: input and biases have been activated when the model was created // Check iteration one with no source/sink/weight tensors already allocated std::map<std::string, int> FP_operations_map; std::vector<OperationList<float>> FP_operations_list; model_interpreter.getNextInactiveLayerWOBiases(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_list); std::vector<std::string> sink_nodes_with_biases2; model_interpreter.getNextInactiveLayerBiases(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); std::set<std::string> sink_nodes_with_cycles; std::map<std::string, int> FP_operations_map_cycles = FP_operations_map; std::vector<OperationList<float>> FP_operations_list_cycles = FP_operations_list; model_interpreter.getNextInactiveLayerCycles(model_getForwardPropogationLayerTensorDimensions, FP_operations_map_cycles, FP_operations_list_cycles, sink_nodes_with_cycles); model_interpreter.pruneInactiveLayerCycles(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_map_cycles, FP_operations_list, FP_operations_list_cycles, sink_nodes_with_cycles); std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); std::map<int, int> max_layer_sizes; std::map<std::string, int> layer_name_pos; std::vector<int> source_layer_sizes, sink_layer_sizes; std::vector<std::vector<std::pair<int, int>>> weight_indices; std::vector<std::map<std::string, std::vector<std::pair<int, int>>>> shared_weight_indices; std::vector<std::vector<float>> weight_values; std::vector<bool> make_source_tensors, make_sink_tensors, make_weight_tensors; std::vector<int> source_layer_pos, sink_layer_pos; int tensor_layers_cnt = 0; int weight_layers_cnt = 0; model_interpreter.getForwardPropogationLayerTensorDimensions(FP_operations_expanded, tensor_ops, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, source_layer_pos, sink_layer_pos, max_layer_sizes, layer_name_pos, tensor_layers_cnt, weight_layers_cnt); BOOST_CHECK_EQUAL(source_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(source_layer_sizes[0], 2); BOOST_CHECK_EQUAL(source_layer_sizes[1], 1); BOOST_CHECK_EQUAL(sink_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(sink_layer_sizes[0], 1); BOOST_CHECK_EQUAL(sink_layer_sizes[1], 1); BOOST_CHECK_EQUAL(source_layer_pos.size(), 2); BOOST_CHECK_EQUAL(source_layer_pos.at(0), 1); BOOST_CHECK_EQUAL(source_layer_pos.at(1), 0); BOOST_CHECK_EQUAL(sink_layer_pos.size(), 2); BOOST_CHECK_EQUAL(sink_layer_pos.at(0), 0); BOOST_CHECK_EQUAL(sink_layer_pos.at(1), 0); BOOST_CHECK_EQUAL(max_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(max_layer_sizes.at(0), 0); BOOST_CHECK_EQUAL(max_layer_sizes.at(1), 1); BOOST_CHECK_EQUAL(layer_name_pos.size(), 0); BOOST_CHECK_EQUAL(weight_indices.size(), 2); BOOST_CHECK_EQUAL(weight_indices[0].size(), 2); BOOST_CHECK_EQUAL(weight_indices[1].size(), 1); std::vector<std::vector<std::pair<int, int>>> weight_indices_test1 = { {std::make_pair(0,0),std::make_pair(1,0)}, {std::make_pair(0,0)} }; for (int tensor_iter = 0; tensor_iter < weight_indices_test1.size(); ++tensor_iter) { for (int i = 0; i < weight_indices_test1[tensor_iter].size(); ++i) { BOOST_CHECK_EQUAL(weight_indices[tensor_iter][i].first, weight_indices_test1[tensor_iter][i].first); BOOST_CHECK_EQUAL(weight_indices[tensor_iter][i].second, weight_indices_test1[tensor_iter][i].second); } } BOOST_CHECK_EQUAL(shared_weight_indices.size(), 2); BOOST_CHECK_EQUAL(shared_weight_indices[0].size(), 0); BOOST_CHECK_EQUAL(shared_weight_indices[1].size(), 0); BOOST_CHECK_EQUAL(weight_values.size(), 2); BOOST_CHECK_EQUAL(weight_values[0].size(), 2); BOOST_CHECK_EQUAL(weight_values[1].size(), 1); std::vector<std::vector<float>> weight_values_test1 = { {1, 1}, {1} }; for (int tensor_iter = 0; tensor_iter < weight_values_test1.size(); ++tensor_iter) { for (int i = 0; i < weight_values_test1[tensor_iter].size(); ++i) { BOOST_CHECK_EQUAL(weight_values[tensor_iter][i], weight_values_test1[tensor_iter][i]); } } BOOST_CHECK_EQUAL(make_source_tensors.size(), 2); BOOST_CHECK(make_source_tensors[0]); BOOST_CHECK(!make_source_tensors[1]); BOOST_CHECK_EQUAL(make_sink_tensors.size(), 2); BOOST_CHECK(make_sink_tensors[0]); BOOST_CHECK(!make_sink_tensors[1]); BOOST_CHECK_EQUAL(make_weight_tensors.size(), 2); BOOST_CHECK(make_weight_tensors[0]); BOOST_CHECK(make_weight_tensors[1]); // Check iteration two model_getForwardPropogationLayerTensorDimensions.getNodesMap().at("1")->setStatus(NodeStatus::activated); FP_operations_map.clear(); FP_operations_list.clear(); model_interpreter.getNextInactiveLayerWOBiases(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_list); sink_nodes_with_biases2.clear(); model_interpreter.getNextInactiveLayerBiases(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_list, sink_nodes_with_biases2); sink_nodes_with_cycles.clear(); FP_operations_map_cycles = FP_operations_map; FP_operations_list_cycles = FP_operations_list; model_interpreter.getNextInactiveLayerCycles(model_getForwardPropogationLayerTensorDimensions, FP_operations_map_cycles, FP_operations_list_cycles, sink_nodes_with_cycles); model_interpreter.pruneInactiveLayerCycles(model_getForwardPropogationLayerTensorDimensions, FP_operations_map, FP_operations_map_cycles, FP_operations_list, FP_operations_list_cycles, sink_nodes_with_cycles); FP_operations_expanded.clear(); model_interpreter.expandAllForwardPropogationOperations(FP_operations_list, FP_operations_expanded); identified_sink_nodes.clear(); tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); max_layer_sizes.clear(); layer_name_pos.clear(); source_layer_sizes.clear(); sink_layer_sizes.clear(); weight_indices.clear(); shared_weight_indices.clear(); weight_values.clear(); make_source_tensors.clear(); make_sink_tensors.clear(); make_weight_tensors.clear(); source_layer_pos.clear(); sink_layer_pos.clear(); tensor_layers_cnt = 0; weight_layers_cnt = 0; model_interpreter.getForwardPropogationLayerTensorDimensions(FP_operations_expanded, tensor_ops, source_layer_sizes, sink_layer_sizes, weight_indices, shared_weight_indices, weight_values, make_source_tensors, make_sink_tensors, make_weight_tensors, source_layer_pos, sink_layer_pos, max_layer_sizes, layer_name_pos, tensor_layers_cnt, weight_layers_cnt); BOOST_CHECK_EQUAL(source_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(source_layer_sizes[0], 1); BOOST_CHECK_EQUAL(source_layer_sizes[1], 1); BOOST_CHECK_EQUAL(sink_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(sink_layer_sizes[0], 1); BOOST_CHECK_EQUAL(sink_layer_sizes[1], 1); BOOST_CHECK_EQUAL(source_layer_pos.size(), 2); BOOST_CHECK_EQUAL(source_layer_pos.at(0), 0); BOOST_CHECK_EQUAL(source_layer_pos.at(1), 1); BOOST_CHECK_EQUAL(sink_layer_pos.size(), 2); BOOST_CHECK_EQUAL(sink_layer_pos.at(0), 0); BOOST_CHECK_EQUAL(sink_layer_pos.at(1), 0); BOOST_CHECK_EQUAL(max_layer_sizes.size(), 2); BOOST_CHECK_EQUAL(max_layer_sizes.at(0), 0); BOOST_CHECK_EQUAL(max_layer_sizes.at(1), 0); BOOST_CHECK_EQUAL(layer_name_pos.size(), 0); BOOST_CHECK_EQUAL(weight_indices.size(), 2); BOOST_CHECK_EQUAL(weight_indices[0].size(), 1); BOOST_CHECK_EQUAL(weight_indices[1].size(), 1); std::vector<std::vector<std::pair<int,int>>> weight_indices_test2 = { {std::make_pair(0,0)},{std::make_pair(0,0) } }; for (int tensor_iter = 0; tensor_iter < weight_indices_test2.size(); ++tensor_iter) { for (int i = 0; i < weight_indices_test2[tensor_iter].size(); ++i) { BOOST_CHECK_EQUAL(weight_indices[tensor_iter][i].first, weight_indices_test2[tensor_iter][i].first); BOOST_CHECK_EQUAL(weight_indices[tensor_iter][i].second, weight_indices_test2[tensor_iter][i].second); } } BOOST_CHECK_EQUAL(shared_weight_indices.size(), 2); BOOST_CHECK_EQUAL(shared_weight_indices[0].size(), 0); BOOST_CHECK_EQUAL(shared_weight_indices[1].size(), 0); BOOST_CHECK_EQUAL(weight_values.size(), 2); BOOST_CHECK_EQUAL(weight_values[0].size(), 1); BOOST_CHECK_EQUAL(weight_values[1].size(), 1); std::vector<std::vector<float>> weight_values_test2 = { {1}, {1} }; for (int tensor_iter = 0; tensor_iter < weight_values_test2.size(); ++tensor_iter) { for (int i = 0; i < weight_values_test2[tensor_iter].size(); ++i) { BOOST_CHECK_EQUAL(weight_values[tensor_iter][i], weight_values_test2[tensor_iter][i]); } } BOOST_CHECK_EQUAL(make_source_tensors.size(), 2); BOOST_CHECK(!make_source_tensors[0]); BOOST_CHECK(make_source_tensors[1]); BOOST_CHECK_EQUAL(make_sink_tensors.size(), 2); BOOST_CHECK(make_sink_tensors[0]); BOOST_CHECK(!make_sink_tensors[1]); BOOST_CHECK_EQUAL(make_weight_tensors.size(), 2); BOOST_CHECK(make_weight_tensors[0]); BOOST_CHECK(make_weight_tensors[1]); } /* The following test the expected `tensor_ops_steps` and `FP_operations` for more complicated model structures */ template<typename TensorT> void makeModelLSTM(Model<TensorT>& model, const int& n_inputs, int n_blocks = 2, int n_cells = 2, bool specify_layers = false) { model.setId(0); model.setName("LSTM"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Add the LSTM layer std::vector<std::string> node_names = model_builder.addLSTM(model, "LSTM", "LSTM", node_names_input, n_blocks, n_cells, std::shared_ptr<ActivationOp<TensorT>>(new ReLUOp<float>()), std::shared_ptr<ActivationOp<TensorT>>(new ReLUGradOp<float>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), //std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(0.4)), std::shared_ptr<WeightInitOp<TensorT>>(new RangeWeightInitOp<TensorT>(0.0, 1.0)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.0005, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, true, true, 1, specify_layers); // Add a final output layer (Specify the layer name to ensure the output is always on its own tensor!!!) node_names = model_builder.addFullyConnected(model, "Output", "Output", node_names, 1, std::make_shared<LinearOp<TensorT>>(LinearOp<float>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<float>()), std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()), std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()), std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()), std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 2)), std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(0.001, 0.9, 0.999, 1e-8)), 0.0f, 0.0f, true, true); for (const std::string& node_name : node_names) model.getNodesMap().at(node_name)->setType(NodeType::output); if (!model.checkCompleteInputToOutput()) std::cout << "Model input and output are not fully connected!" << std::endl; } BOOST_AUTO_TEST_CASE(makeModelLSTM1) { ModelInterpreterDefaultDevice<float> model_interpreter; // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model_test; model_test.findCycles(); makeModelLSTM(model_test, 2, 1, 2, true); int iter_test = 0; std::vector<OperationList<float>> FP_operations_expanded_test; model_interpreter.getFPOpsOoO_(model_test, FP_operations_expanded_test, iter_test); std::set<std::string> identified_sink_nodes_test; std::map<std::string, std::vector<int>> tensor_ops_test = model_interpreter.getTensorOperations(FP_operations_expanded_test, identified_sink_nodes_test, true); // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model; model.findCycles(); makeModelLSTM(model, 2, 1, 2, false); int iter = 0; std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.getFPOpsOoO_(model, FP_operations_expanded, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(iter_test, iter); BOOST_CHECK(tensor_ops_test == tensor_ops); BOOST_CHECK(identified_sink_nodes_test == identified_sink_nodes); BOOST_CHECK_EQUAL(FP_operations_expanded_test.size(), FP_operations_expanded.size()); if (tensor_ops_test == tensor_ops && identified_sink_nodes_test == identified_sink_nodes && FP_operations_expanded_test.size() == FP_operations_expanded.size()) { for (int i = 0; i < FP_operations_expanded_test.size(); ++i) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.sink_node->getName(), FP_operations_expanded[i].result.sink_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.time_step, FP_operations_expanded[i].result.time_step); for (int j = 0; j < FP_operations_expanded_test[i].arguments.size(); ++j) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].source_node->getName(), FP_operations_expanded[i].arguments[j].source_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].weight->getName(), FP_operations_expanded[i].arguments[j].weight->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].time_step, FP_operations_expanded[i].arguments[j].time_step); } } } } BOOST_AUTO_TEST_CASE(makeModelLSTM2) { ModelInterpreterDefaultDevice<float> model_interpreter; // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model_test; model_test.findCycles(); makeModelLSTM(model_test, 2, 4, 2, true); int iter_test = 0; std::vector<OperationList<float>> FP_operations_expanded_test; model_interpreter.getFPOpsOoO_(model_test, FP_operations_expanded_test, iter_test); std::set<std::string> identified_sink_nodes_test; std::map<std::string, std::vector<int>> tensor_ops_test = model_interpreter.getTensorOperations(FP_operations_expanded_test, identified_sink_nodes_test, true); // Determine the tensor_ops_steps and FP_operations for the manually specified layer case Model<float> model; model.findCycles(); makeModelLSTM(model, 2, 4, 2, false); int iter = 0; std::vector<OperationList<float>> FP_operations_expanded; model_interpreter.getFPOpsOoO_(model, FP_operations_expanded, iter); std::set<std::string> identified_sink_nodes; std::map<std::string, std::vector<int>> tensor_ops = model_interpreter.getTensorOperations(FP_operations_expanded, identified_sink_nodes, false); BOOST_CHECK_EQUAL(iter_test, iter); BOOST_CHECK(tensor_ops_test == tensor_ops); BOOST_CHECK(identified_sink_nodes_test == identified_sink_nodes); BOOST_CHECK_EQUAL(FP_operations_expanded_test.size(), FP_operations_expanded.size()); if (tensor_ops_test == tensor_ops && identified_sink_nodes_test == identified_sink_nodes && FP_operations_expanded_test.size() == FP_operations_expanded.size()) { for (int i = 0; i < FP_operations_expanded_test.size(); ++i) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.sink_node->getName(), FP_operations_expanded[i].result.sink_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].result.time_step, FP_operations_expanded[i].result.time_step); for (int j = 0; j < FP_operations_expanded_test[i].arguments.size(); ++j) { BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].source_node->getName(), FP_operations_expanded[i].arguments[j].source_node->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].weight->getName(), FP_operations_expanded[i].arguments[j].weight->getName()); BOOST_CHECK_EQUAL(FP_operations_expanded_test[i].arguments[j].time_step, FP_operations_expanded[i].arguments[j].time_step); } } } } BOOST_AUTO_TEST_SUITE_END()<file_sep>/**TODO: Add copyright*/ #include <EvoNet/ml/PopulationTrainerDefaultDevice.h> #include <EvoNet/ml/ModelTrainerDefaultDevice.h> #include <EvoNet/ml/ModelReplicator.h> #include <EvoNet/ml/ModelBuilder.h> #include <EvoNet/ml/Model.h> #include <EvoNet/io/PopulationTrainerFile.h> #include <EvoNet/io/ModelInterpreterFileDefaultDevice.h> #include <EvoNet/io/ModelFile.h> #include <EvoNet/simulator/MNISTSimulator.h> #include <unsupported/Eigen/CXX11/Tensor> using namespace EvoNet; // Extended template<typename TensorT> class ModelTrainerExt : public ModelTrainerDefaultDevice<TensorT> { public: /* @brief Basic VAE with Xavier-like initialization References: Based on Kingma et al, 2014: https://arxiv.org/pdf/1312.6114 https://github.com/pytorch/examples/blob/master/vae/main.py @param[in, out] model The network model @param[in] n_inputs The number of input pixels @param[in] n_encodings The length of the encodings layer @param[in] n_hidden The length of the hidden layers @param[in] specify_layers Whether to give the `ModelInterpreter` "hints" as to the correct network structure during graph to tensor compilation */ void makeVAEFullyConn(Model<TensorT>& model, const int& n_inputs = 784, const int& n_encodings = 64, const int& n_hidden_0 = 512, const int& n_hidden_1 = 256, const int& n_hidden_2 = 64, const bool& add_feature_norm = true, const bool& add_batch_norm = true, const bool& add_bias = true, const bool& specify_layers = false) { model.setId(0); model.setName("VAE"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` std::shared_ptr<ActivationOp<TensorT>> activation, activation_grad; if (add_feature_norm || add_batch_norm) { activation = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } else { activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); //activation = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>(1e-24, 0, 1)); //activation_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } std::shared_ptr<ActivationOp<TensorT>> activation_norm = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); std::shared_ptr<ActivationOp<TensorT>> activation_norm_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); //std::shared_ptr<ActivationOp<TensorT>> activation_norm = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>(1e-24, 0, 1)); //std::shared_ptr<ActivationOp<TensorT>> activation_norm_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-5, 0.9, 0.999, 1e-8, 10)); // Add the Endocer FC layers std::vector<std::string> node_names_mu, node_names_logvar; if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "EN0", "EN0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); if (add_batch_norm) { node_names = model_builder.addSinglyConnected(model, "EN0-BatchNorm", "EN0-BatchNorm", node_names, node_names.size(), std::make_shared<BatchNormOp<TensorT>>(BatchNormOp<TensorT>()), std::make_shared<BatchNormGradOp<TensorT>>(BatchNormGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); node_names = model_builder.addSinglyConnected(model, "EN0-BatchNorm-gain", "EN0-BatchNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "EN0-FeatureNorm", "EN0-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN0-FeatureNorm-gain", "EN0-FeatureNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); //std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "EN1", "EN1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); if (add_batch_norm) { node_names = model_builder.addSinglyConnected(model, "EN1-BatchNorm", "EN1-BatchNorm", node_names, node_names.size(), std::make_shared<BatchNormOp<TensorT>>(BatchNormOp<TensorT>()), std::make_shared<BatchNormGradOp<TensorT>>(BatchNormGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); node_names = model_builder.addSinglyConnected(model, "EN1-BatchNorm-gain", "EN1-BatchNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "EN1-FeatureNorm", "EN1-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN1-FeatureNorm-gain", "EN1-FeatureNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); //std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } } if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "EN2", "EN2", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); if (add_batch_norm) { node_names = model_builder.addSinglyConnected(model, "EN2-BatchNorm", "EN2-BatchNorm", node_names, node_names.size(), std::make_shared<BatchNormOp<TensorT>>(BatchNormOp<TensorT>()), std::make_shared<BatchNormGradOp<TensorT>>(BatchNormGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); node_names = model_builder.addSinglyConnected(model, "EN2-BatchNorm-gain", "EN2-BatchNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "EN2-FeatureNorm", "EN2-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "EN2-FeatureNorm-gain", "EN2-FeatureNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); //std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } } node_names_mu = model_builder.addFullyConnected(model, "MuEnc", "MuEnc", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_logvar = model_builder.addFullyConnected(model, "LogVarEnc", "LogVarEnc", node_names, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); // Add the Encoding layers node_names = model_builder.addGaussianEncoding(model, "Encoding", "Encoding", node_names_mu, node_names_logvar, specify_layers); node_names = node_names_mu; // FIXME // Add the Decoder FC layers if (n_hidden_2 > 0) { node_names = model_builder.addFullyConnected(model, "DE2", "DE2", node_names, n_hidden_2, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_2) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); if (add_feature_norm) { node_names = model_builder.addNormalization(model, "DE2-FeatureNorm", "DE2-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE2-FeatureNorm-gain", "DE2-FeatureNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); //std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } } if (n_hidden_1 > 0) { node_names = model_builder.addFullyConnected(model, "DE1", "DE1", node_names, n_hidden_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_1) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); if (add_batch_norm) { node_names = model_builder.addSinglyConnected(model, "DE1-BatchNorm", "DE1-BatchNorm", node_names, node_names.size(), std::make_shared<BatchNormOp<TensorT>>(BatchNormOp<TensorT>()), std::make_shared<BatchNormGradOp<TensorT>>(BatchNormGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); node_names = model_builder.addSinglyConnected(model, "DE1-BatchNorm-gain", "DE1-BatchNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "DE1-FeatureNorm", "DE1-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE1-FeatureNorm-gain", "DE1-FeatureNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); //std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } } if (n_hidden_0 > 0) { node_names = model_builder.addFullyConnected(model, "DE0", "DE0", node_names, n_hidden_0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_hidden_0) / 2, 1)), solver_op, 0.0f, 0.0f, add_bias, specify_layers); if (add_batch_norm) { node_names = model_builder.addSinglyConnected(model, "DE2-BatchNorm", "DE2-BatchNorm", node_names, node_names.size(), std::make_shared<BatchNormOp<TensorT>>(BatchNormOp<TensorT>()), std::make_shared<BatchNormGradOp<TensorT>>(BatchNormGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); node_names = model_builder.addSinglyConnected(model, "DE2-BatchNorm-gain", "DE2-BatchNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } if (add_feature_norm) { node_names = model_builder.addNormalization(model, "DE0-FeatureNorm", "DE0-FeatureNorm", node_names, true); node_names = model_builder.addSinglyConnected(model, "DE0-FeatureNorm-gain", "DE0-FeatureNorm-gain", node_names, node_names.size(), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); //std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0, 0.0, false, specify_layers); } } node_names = model_builder.addFullyConnected(model, "DE-Output", "DE-Output", node_names, n_inputs, //std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), //std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), activation_norm, activation_norm_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names.size(), 1)), solver_op, 0.0f, 0.0f, add_bias, true); // Add the actual output nodes node_names_mu = model_builder.addSinglyConnected(model, "Mu", "Mu", node_names_mu, node_names_mu.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names_logvar = model_builder.addSinglyConnected(model, "LogVar", "LogVar", node_names_logvar, node_names_logvar.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names = model_builder.addSinglyConnected(model, "Output", "Output", node_names, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_mu) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_logvar) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } /* @brief Convolution Variational Autoencoder References: Inspired by: https://github.com/pytorch/examples/blob/master/mnist/main.py Based on Dupont et al, 2018: arXiv:1804.00104 https://github.com/Schlumberger/joint-vae */ void makeVAECovNet(Model<TensorT>& model, const int& n_inputs, const int& n_encodings, const int& n_enc_depth_1 = 32, const int& n_enc_depth_2 = 2, const int& n_enc_depth_3 = 2, const int& n_dec_depth_1 = 2, const int& n_dec_depth_2 = 2, const int& n_dec_depth_3 = 1, const int& n_enc_fc_1 = 128, const int& n_dec_fc_1 = 126, const int& filter_size = 4, const int& stride_size = 2, const bool& add_feature_norm = false, const bool& specify_layers = false) { model.setId(0); model.setName("VAE"); ModelBuilder<TensorT> model_builder; // Add the inputs std::vector<std::string> node_names_input = model_builder.addInputNodes(model, "Input", "Input", n_inputs, specify_layers); // Define the activation based on `add_feature_norm` std::shared_ptr<ActivationOp<TensorT>> activation, activation_grad; if (add_feature_norm) { activation = std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()); activation_grad = std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()); } else { activation = std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()); activation_grad = std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()); } // Define the node integration auto integration_op = std::make_shared<SumOp<TensorT>>(SumOp<TensorT>()); auto integration_error_op = std::make_shared<SumErrorOp<TensorT>>(SumErrorOp<TensorT>()); auto integration_weight_grad_op = std::make_shared<SumWeightGradOp<TensorT>>(SumWeightGradOp<TensorT>()); // Define the solver auto solver_op = std::make_shared<AdamOp<TensorT>>(AdamOp<TensorT>(5e-4, 0.9, 0.999, 1e-8, 10)); // Add the first convolution -> ReLU layers std::vector<std::vector<std::string>> node_names_l0; for (size_t d = 0; d < n_enc_depth_1; ++d) { std::vector<std::string> node_names; std::string conv_name = "Enc-Conv0-" + std::to_string(d); node_names = model_builder.addConvolution(model, conv_name, conv_name, node_names_input, sqrt(node_names_input.size()), sqrt(node_names_input.size()), 0, 0, filter_size, filter_size, stride_size, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { std::string norm_name = "Enc-FeatureNorm0-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); std::string gain_name = "Enc-Gain0-" + std::to_string(d); node_names = model_builder.addSinglyConnected(model, gain_name, gain_name, node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } node_names_l0.push_back(node_names); } // Add the second convolution -> ReLU layers std::vector<std::vector<std::string>> node_names_l1; int l_cnt = 0; for (const std::vector<std::string>& node_names_l : node_names_l0) { for (size_t d = 0; d < n_enc_depth_2; ++d) { std::vector<std::string> node_names; std::string conv_name = "Enc-Conv1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addConvolution(model, conv_name, conv_name, node_names_l, sqrt(node_names_l.size()), sqrt(node_names_l.size()), 0, 0, filter_size, filter_size, stride_size, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { std::string norm_name = "Enc-FeatureNorm1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); std::string gain_name = "Enc-Gain1-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addSinglyConnected(model, gain_name, gain_name, node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } node_names_l1.push_back(node_names); } ++l_cnt; } // Add the third convolution -> ReLU layers std::vector<std::vector<std::string>> node_names_l2; l_cnt = 0; for (const std::vector<std::string>& node_names_l : node_names_l1) { for (size_t d = 0; d < n_enc_depth_3; ++d) { std::vector<std::string> node_names; std::string conv_name = "Enc-Conv2-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addConvolution(model, conv_name, conv_name, node_names_l, sqrt(node_names_l.size()), sqrt(node_names_l.size()), 0, 0, filter_size, filter_size, stride_size, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { std::string norm_name = "Enc-FeatureNorm2-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, specify_layers); std::string gain_name = "Enc-Gain2-" + std::to_string(l_cnt) + "-" + std::to_string(d); node_names = model_builder.addSinglyConnected(model, gain_name, gain_name, node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } node_names_l2.push_back(node_names); } ++l_cnt; } // Linearize the node names std::vector<std::string> node_names_conv_linearized; int last_conv_depth; if (node_names_l2.size()) { for (const std::vector<std::string>& node_names_l : node_names_l2) { for (const std::string& node_name : node_names_l) { node_names_conv_linearized.push_back(node_name); } } last_conv_depth = n_enc_depth_3 * n_enc_depth_2 * n_enc_depth_1; } else if (node_names_l1.size()) { for (const std::vector<std::string>& node_names_l : node_names_l1) { for (const std::string& node_name : node_names_l) { node_names_conv_linearized.push_back(node_name); } } last_conv_depth = n_enc_depth_2 * n_enc_depth_1; } else { for (const std::vector<std::string>& node_names_l : node_names_l0) { for (const std::string& node_name : node_names_l) { node_names_conv_linearized.push_back(node_name); } } last_conv_depth = n_enc_depth_1; } // Add the FC layers std::vector<std::string> node_names_enc_fc = model_builder.addFullyConnected(model, "Enc-FC0", "Enc-FC0", node_names_conv_linearized, n_enc_fc_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(node_names_conv_linearized.size() + n_enc_fc_1, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { node_names_enc_fc = model_builder.addNormalization(model, "Enc-FC0-FeatureNorm", "Enc-FC0-FeatureNorm", node_names_enc_fc, true); node_names_enc_fc = model_builder.addSinglyConnected(model, "Enc-FC0-FeatureNorm-gain", "Enc-FC0-FeatureNorm-gain", node_names_enc_fc, node_names_enc_fc.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } // Add the Encoding layers std::vector<std::string> node_names_mu = model_builder.addFullyConnected(model, "MuEnc", "MuEnc", node_names_enc_fc, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_enc_fc.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); std::vector<std::string> node_names_logvar = model_builder.addFullyConnected(model, "LogVarEnc", "LogVarEnc", node_names_enc_fc, n_encodings, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_enc_fc.size() + n_encodings) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); // Add the Encoding layers std::vector<std::string> node_names_encoder = model_builder.addGaussianEncoding(model, "Encoding", "Encoding", node_names_mu, node_names_logvar, specify_layers); // Add the Decoder FC layers std::vector<std::string> node_names_dec_fc0 = model_builder.addFullyConnected(model, "Dec-FC0", "Dec-FC0", node_names_encoder, n_dec_fc_1, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names_encoder.size() + n_dec_fc_1) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { node_names_dec_fc0 = model_builder.addNormalization(model, "Dec-FC0-FeatureNorm", "Dec-FC0-FeatureNorm", node_names_dec_fc0, true); node_names_dec_fc0 = model_builder.addSinglyConnected(model, "Dec-FC0-FeatureNorm-gain", "Dec-FC0-FeatureNorm-gain", node_names_dec_fc0, node_names_dec_fc0.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } // Add the Decoder FC layer to begin the transpose convolutions const int n_dec_fc = node_names_conv_linearized.size() / last_conv_depth; int node_iter = 0; std::vector<std::vector<std::string>> node_names_dec_fc1; for (size_t d = 0; d < last_conv_depth; ++d) { std::vector<std::string> node_names; std::string fc_name = "Dec-FC1-" + std::to_string(d); node_names = model_builder.addFullyConnected(model, fc_name, fc_name, node_names_dec_fc0, n_dec_fc, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>((TensorT)(node_names.size() + n_dec_fc) / 2, 1)), solver_op, 0.0f, 0.0f, false, specify_layers); if (add_feature_norm) { std::string norm_name = "Dec-FC1-FeatureNorm-" + std::to_string(d); node_names = model_builder.addNormalization(model, norm_name, norm_name, node_names, true); std::string gain_name = "Dec-FC1-FeatureNorm-gain-" + std::to_string(d); node_names = model_builder.addSinglyConnected(model, gain_name, gain_name, node_names, node_names.size(), std::make_shared<LeakyReLUOp<TensorT>>(LeakyReLUOp<TensorT>()), // Nonlinearity occures after the normalization std::make_shared<LeakyReLUGradOp<TensorT>>(LeakyReLUGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), solver_op, 0.0, 0.0, true, specify_layers); } node_names_dec_fc1.push_back(node_names); node_iter += n_dec_fc; } // Add the first transpose convolution -> ReLU layers node_names_l0.clear(); l_cnt = 0; for (const std::vector<std::string>& node_names_l : node_names_dec_fc1) { for (size_t d = 0; d < n_dec_depth_1; ++d) { std::vector<std::string> node_names; std::string conv_module = "Dec-Conv0-" + std::to_string(l_cnt) + "-" + std::to_string(d); std::string conv_name = "Dec-Conv0-" + std::to_string(d); if (l_cnt == 0) { node_names = model_builder.addConvolution(model, conv_name, conv_module, node_names_l, sqrt(node_names_l.size()), sqrt(node_names_l.size()), filter_size - 1, filter_size - 1, filter_size, filter_size, stride_size, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_l0.push_back(node_names); } else { model_builder.addConvolution(model, conv_name, conv_module, node_names_l, node_names_l0.at(d), sqrt(node_names_l.size()), sqrt(node_names_l.size()), filter_size - 1, filter_size - 1, filter_size, filter_size, stride_size, 0, 0, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, specify_layers); } // TODO: Norms } ++l_cnt; } // Add the second transpose convolution -> ReLU layers node_names_l1.clear(); l_cnt = 0; for (const std::vector<std::string>& node_names_l : node_names_l0) { for (size_t d = 0; d < n_dec_depth_2; ++d) { std::vector<std::string> node_names; std::string conv_module = "Dec-Conv1-" + std::to_string(l_cnt) + "-" + std::to_string(d); std::string conv_name = "Dec-Conv1-" + std::to_string(d); if (l_cnt == 0) { node_names = model_builder.addConvolution(model, conv_name, conv_module, node_names_l, sqrt(node_names_l.size()), sqrt(node_names_l.size()), filter_size - 1, filter_size - 1, filter_size, filter_size, stride_size, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_l1.push_back(node_names); } else { model_builder.addConvolution(model, conv_name, conv_module, node_names_l, node_names_l1.at(d), sqrt(node_names_l.size()), sqrt(node_names_l.size()), filter_size - 1, filter_size - 1, filter_size, filter_size, stride_size, 0, 0, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, specify_layers); } } ++l_cnt; } // Add the third transpose convolution -> ReLU layers node_names_l2.clear(); l_cnt = 0; for (const std::vector<std::string>& node_names_l : node_names_l1) { for (size_t d = 0; d < n_dec_depth_3; ++d) { std::vector<std::string> node_names; std::string conv_module = "Dec-Conv2-" + std::to_string(l_cnt) + "-" + std::to_string(d); std::string conv_name = "Dec-Conv2-" + std::to_string(d); if (l_cnt == 0) { node_names = model_builder.addConvolution(model, conv_name, conv_module, node_names_l, sqrt(node_names_l.size()), sqrt(node_names_l.size()), filter_size - 1, filter_size - 1, filter_size, filter_size, stride_size, 0, 0, activation, activation_grad, integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, false, specify_layers); node_names_l2.push_back(node_names); } else { model_builder.addConvolution(model, conv_name, conv_module, node_names_l, node_names_l2.at(d), sqrt(node_names_l.size()), sqrt(node_names_l.size()), filter_size - 1, filter_size - 1, filter_size, filter_size, stride_size, 0, 0, std::make_shared<RandWeightInitOp<TensorT>>(RandWeightInitOp<TensorT>(filter_size * filter_size, 2)), solver_op, 0.0f, 0.0f, specify_layers); } } ++l_cnt; } // TODO: last layer should be //std::make_shared<SigmoidOp<TensorT>>(SigmoidOp<TensorT>()), //std::make_shared<SigmoidGradOp<TensorT>>(SigmoidGradOp<TensorT>()), node_names_conv_linearized.clear(); if (node_names_l2.size() > 0) { for (const std::vector<std::string>& node_names_l : node_names_l2) { for (const std::string& node_name : node_names_l) { node_names_conv_linearized.push_back(node_name); } } } else if (node_names_l1.size() > 0) { for (const std::vector<std::string>& node_names_l : node_names_l1) { for (const std::string& node_name : node_names_l) { node_names_conv_linearized.push_back(node_name); } } } else if (node_names_l0.size() > 0) { for (const std::vector<std::string>& node_names_l : node_names_l0) { for (const std::string& node_name : node_names_l) { node_names_conv_linearized.push_back(node_name); } } } else { for (const std::vector<std::string>& node_names_l : node_names_dec_fc1) { for (const std::string& node_name : node_names_l) { node_names_conv_linearized.push_back(node_name); } } } assert(node_names_conv_linearized.size() == n_inputs); // Add the actual output nodes node_names_mu = model_builder.addSinglyConnected(model, "Mu", "Mu", node_names_mu, node_names_mu.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); node_names_logvar = model_builder.addSinglyConnected(model, "LogVar", "LogVar", node_names_logvar, node_names_logvar.size(), std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); std::vector<std::string> node_names_output = model_builder.addSinglyConnected(model, "Output", "Output", node_names_conv_linearized, n_inputs, std::make_shared<LinearOp<TensorT>>(LinearOp<TensorT>()), std::make_shared<LinearGradOp<TensorT>>(LinearGradOp<TensorT>()), integration_op, integration_error_op, integration_weight_grad_op, std::make_shared<ConstWeightInitOp<TensorT>>(ConstWeightInitOp<TensorT>(1)), std::make_shared<DummySolverOp<TensorT>>(DummySolverOp<TensorT>()), 0.0f, 0.0f, false, true); // Specify the output node types manually for (const std::string& node_name : node_names_mu) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_logvar) model.nodes_.at(node_name)->setType(NodeType::output); for (const std::string& node_name : node_names_output) model.nodes_.at(node_name)->setType(NodeType::output); model.setInputAndOutputNodes(); } void adaptiveTrainerScheduler( const int& n_generations, const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, const std::vector<float>& model_errors) { //if (n_epochs % 1000 == 0 && n_epochs > 5000) { // // anneal the learning rate by half on each plateau // TensorT lr_new = this->reduceLROnPlateau(model_errors, 0.5, 100, 10, 0.05); // if (lr_new < 1.0) { // model_interpreter.updateSolverParams(0, lr_new); // std::cout << "The learning rate has been annealed by a factor of " << lr_new << std::endl; // } //} //// Increase the KL divergence capacity //TensorT capacity_z = 5 / 2.5e4 * n_epochs; //if (capacity_z > 5) capacity_z = 5; //this->getLossFunctions().at(1) = std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>(1e-6, 30.0, capacity_z)); //this->getLossFunctions().at(2) = std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>(1e-6, 30.0, capacity_z)); //this->getLossFunctionGrads().at(1) = std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>(1e-6, 30.0, capacity_z)); //this->getLossFunctionGrads().at(2) = std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>(1e-6, 30.0, capacity_z)); //// Increase the KL divergence beta //TensorT beta = 30 / 2.5e4 * n_epochs; //if (beta > 30) beta = 30; //this->getLossFunctions().at(1) = std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>(1e-6, beta, 0.0)); //this->getLossFunctions().at(2) = std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>(1e-6, beta, 0.0)); //this->getLossFunctionGrads().at(1) = std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>(1e-6, beta, 0.0)); //this->getLossFunctionGrads().at(2) = std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>(1e-6, beta, 0.0)); if (n_epochs % 1000 == 0 && n_epochs != 0) { // save the model every 1000 epochs model_interpreter.getModelResults(model, false, true, false, false); // save the model and tensors to binary ModelFile<TensorT> data; data.storeModelBinary(model.getName() + "_" + std::to_string(n_epochs) + "_model.binary", model); ModelInterpreterFileDefaultDevice<TensorT> interpreter_data; interpreter_data.storeModelInterpreterBinary(model.getName() + "_" + std::to_string(n_epochs) + "_interpreter.binary", model_interpreter); } } void trainingModelLogger(const int& n_epochs, Model<TensorT>& model, ModelInterpreterDefaultDevice<TensorT>& model_interpreter, ModelLogger<TensorT>& model_logger, const Eigen::Tensor<TensorT, 3>& expected_values, const std::vector<std::string>& output_nodes, const std::vector<std::string>& input_nodes, const TensorT& model_error_train, const TensorT& model_error_test, const Eigen::Tensor<TensorT, 1>& model_metrics_train, const Eigen::Tensor<TensorT, 1>& model_metrics_test) override { // Set the defaults model_logger.setLogTimeEpoch(true); model_logger.setLogTrainValMetricEpoch(true); model_logger.setLogExpectedEpoch(false); model_logger.setLogNodeOutputsEpoch(false); model_logger.setLogNodeInputsEpoch(false); // initialize all logs if (n_epochs == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_logger.initLogs(model); } // Per n epoch logging if (n_epochs % 1000 == 0) { model_logger.setLogExpectedEpoch(true); model_logger.setLogNodeOutputsEpoch(true); model_logger.setLogNodeInputsEpoch(true); model_interpreter.getModelResults(model, true, false, false, true); } // Create the metric headers and data arrays std::vector<std::string> log_train_headers = { "Train_Error" }; std::vector<std::string> log_test_headers = { "Test_Error" }; std::vector<TensorT> log_train_values = { model_error_train }; std::vector<TensorT> log_test_values = { model_error_test }; int metric_iter = 0; for (const std::string& metric_name : this->metric_names_) { log_train_headers.push_back(metric_name); log_test_headers.push_back(metric_name); log_train_values.push_back(model_metrics_train(metric_iter)); log_test_values.push_back(model_metrics_test(metric_iter)); ++metric_iter; } model_logger.writeLogs(model, n_epochs, log_train_headers, log_test_headers, log_train_values, log_test_values, output_nodes, expected_values, {}, output_nodes, {}, input_nodes, {}); } }; template<typename TensorT> class DataSimulatorExt : public MNISTSimulator<TensorT> { public: int n_encodings_; int perc_corruption_ = 0; void simulateTrainingData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); const int n_input_pixels = this->validation_data.dimension(1); assert(n_output_nodes == n_input_pixels + 2 * n_encodings_); assert(n_metric_output_nodes == n_metric_output_nodes); assert(n_input_nodes == n_input_pixels + n_encodings_); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getTrainingIndices(batch_size, 1); // Reformat the input data for training for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { // Gaussian Sampler Eigen::Tensor<TensorT, 2> gaussian_samples = GaussianSampler<TensorT>(1, n_encodings_); for (int nodes_iter = 0; nodes_iter < n_input_pixels; ++nodes_iter) { std::random_device rd; std::uniform_int_distribution<int> distribution(1, 100); std::mt19937 engine(rd()); if (distribution(engine) > perc_corruption_) input_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], nodes_iter); else input_data(batch_iter, memory_iter, nodes_iter) = TensorT(0); loss_output_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = this->training_data(sample_indices[batch_iter], nodes_iter); if (nodes_iter < n_encodings_) { input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = gaussian_samples(0, nodes_iter); // sample from a normal distribution loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = 0; // Dummy data for KL divergence mu loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + n_encodings_) = 0; // Dummy data for KL divergence logvar } } } } } void simulateValidationData(Eigen::Tensor<TensorT, 3>& input_data, Eigen::Tensor<TensorT, 3>& loss_output_data, Eigen::Tensor<TensorT, 3>& metric_output_data, Eigen::Tensor<TensorT, 2>& time_steps) { // infer data dimensions based on the input tensors const int batch_size = input_data.dimension(0); const int memory_size = input_data.dimension(1); const int n_input_nodes = input_data.dimension(2); const int n_output_nodes = loss_output_data.dimension(2); const int n_metric_output_nodes = metric_output_data.dimension(2); const int n_input_pixels = this->validation_data.dimension(1); assert(n_output_nodes == n_input_pixels + 2 * n_encodings_); assert(n_metric_output_nodes == n_metric_output_nodes); assert(n_input_nodes == n_input_pixels + n_encodings_); // make the start and end sample indices Eigen::Tensor<int, 1> sample_indices = this->getValidationIndices(batch_size, 1); // Reformat the input data for validation for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) { for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) { for (int nodes_iter = 0; nodes_iter < n_input_pixels; ++nodes_iter) { input_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], nodes_iter); loss_output_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], nodes_iter); metric_output_data(batch_iter, memory_iter, nodes_iter) = this->validation_data(sample_indices[batch_iter], nodes_iter); if (nodes_iter < n_encodings_) { input_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = 0; // sample from a normal distribution loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels) = 0; // Dummy data for KL divergence mu loss_output_data(batch_iter, memory_iter, nodes_iter + n_input_pixels + n_encodings_) = 0; // Dummy data for KL divergence logvar } } } } } }; template<typename TensorT> class ModelReplicatorExt : public ModelReplicator<TensorT> {}; template<typename TensorT> class PopulationTrainerExt : public PopulationTrainerDefaultDevice<TensorT> {}; /** @brief Pixel reconstruction MNIST example whereby all pixels are linearized and read into the model. The model then attempts to reconstruction the pixels using a Variational Auto Encoder network Data processing: - whole image pixels (linearized) 28x28 normalized to 0 to 1 */ void main_MNIST(const std::string& data_dir, const bool& make_model, const bool& train_model) { const int n_hard_threads = std::thread::hardware_concurrency(); const int n_threads = 1; // define the populatin trainer PopulationTrainerExt<float> population_trainer; population_trainer.setNGenerations(1); population_trainer.setLogging(false); // define the population logger PopulationLogger<float> population_logger(true, true); // define the model logger ModelLogger<float> model_logger(true, true, false, false, false, false, false); // define the data simulator const std::size_t input_size = 784; const std::size_t encoding_size = 16; const std::size_t training_data_size = 60000; //60000; const std::size_t validation_data_size = 10000; //10000; DataSimulatorExt<float> data_simulator; // read in the training data std::string training_data_filename = data_dir + "train-images.idx3-ubyte"; std::string training_labels_filename = data_dir + "train-labels.idx1-ubyte"; data_simulator.readData(training_data_filename, training_labels_filename, true, training_data_size, input_size); // read in the validation data std::string validation_data_filename = data_dir + "t10k-images.idx3-ubyte"; std::string validation_labels_filename = data_dir + "t10k-labels.idx1-ubyte"; data_simulator.readData(validation_data_filename, validation_labels_filename, false, validation_data_size, input_size); data_simulator.unitScaleData(); data_simulator.n_encodings_ = encoding_size; data_simulator.perc_corruption_ = 50; // Make the input nodes std::vector<std::string> input_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Input_%012d", i); std::string name(name_char); input_nodes.push_back(name); } // Make the encoding nodes and add them to the input for (int i = 0; i < encoding_size; ++i) { char name_char[512]; sprintf(name_char, "Encoding_%012d-Sampler", i); std::string name(name_char); input_nodes.push_back(name); } // Make the output nodes std::vector<std::string> output_nodes; for (int i = 0; i < input_size; ++i) { char name_char[512]; sprintf(name_char, "Output_%012d", i); std::string name(name_char); output_nodes.push_back(name); } // Make the mu nodes std::vector<std::string> encoding_nodes_mu; for (int i = 0; i < encoding_size; ++i) { char name_char[512]; sprintf(name_char, "Mu_%012d", i); std::string name(name_char); encoding_nodes_mu.push_back(name); } // Make the encoding nodes std::vector<std::string> encoding_nodes_logvar; for (int i = 0; i < encoding_size; ++i) { char name_char[512]; sprintf(name_char, "LogVar_%012d", i); std::string name(name_char); encoding_nodes_logvar.push_back(name); } // define the model trainers and resources for the trainers std::vector<ModelInterpreterDefaultDevice<float>> model_interpreters; for (size_t i = 0; i < n_threads; ++i) { ModelResources model_resources = { ModelDevice(3, 1) }; ModelInterpreterDefaultDevice<float> model_interpreter(model_resources); model_interpreters.push_back(model_interpreter); } ModelTrainerExt<float> model_trainer; //model_trainer.setBatchSize(1); // evaluation only model_trainer.setBatchSize(128); model_trainer.setNEpochsTraining(200001); model_trainer.setNEpochsValidation(25); model_trainer.setNEpochsEvaluation(100); model_trainer.setMemorySize(1); model_trainer.setVerbosityLevel(1); model_trainer.setLogging(true, true, false); model_trainer.setFindCycles(false); model_trainer.setFastInterpreter(true); model_trainer.setLossFunctions({ //std::make_shared<MSELossOp<float>>(MSELossOp<float>(1e-6, 1.0 / float(input_size))), std::make_shared<BCEWithLogitsLossOp<float>>(BCEWithLogitsLossOp<float>(1e-6, 1.0 / float(input_size))), //std::make_shared<BCELossOp<float>>(BCELossOp<float>(1e-6, 1.0 / float(input_size))), std::make_shared<KLDivergenceMuLossOp<float>>(KLDivergenceMuLossOp<float>(1e-6, 0.0, 0.0)), //1.0 / float(encoding_size) std::make_shared<KLDivergenceLogVarLossOp<float>>(KLDivergenceLogVarLossOp<float>(1e-6, 0.0, 0.0)) }); model_trainer.setLossFunctionGrads({ //std::make_shared<MSELossGradOp<float>>(MSELossGradOp<float>(1e-6, 1.0 / float(input_size))), std::make_shared<BCEWithLogitsLossGradOp<float>>(BCEWithLogitsLossGradOp<float>(1e-6, 1.0 / float(input_size))), //std::make_shared<BCELossGradOp<float>>(BCELossGradOp<float>(1e-6, 1.0 / float(input_size))), std::make_shared<KLDivergenceMuLossGradOp<float>>(KLDivergenceMuLossGradOp<float>(1e-6, 0.0, 0.0)), std::make_shared<KLDivergenceLogVarLossGradOp<float>>(KLDivergenceLogVarLossGradOp<float>(1e-6, 0.0, 0.0)) }); model_trainer.setLossOutputNodes({ output_nodes, encoding_nodes_mu, encoding_nodes_logvar }); model_trainer.setMetricFunctions({ std::make_shared<MAEOp<float>>(MAEOp<float>()) }); model_trainer.setMetricOutputNodes({ output_nodes }); model_trainer.setMetricNames({ "MAE" }); // define the model replicator ModelReplicatorExt<float> model_replicator; // define the initial population Model<float> model; if (make_model) { std::cout << "Making the model..." << std::endl; ModelTrainerExt<float>().makeVAEFullyConn(model, input_size, encoding_size, 512, 256, 0, true, true, false, true); //ModelTrainerExt<float>().makeVAECovNet(model, input_size, encoding_size, 32, 1, 0, 2, 1, 0, 128, 128, 7, 1, false, true); } else { // read in the trained model std::cout << "Reading in the model..." << std::endl; const std::string model_filename = data_dir + "VAE_model.binary"; const std::string interpreter_filename = data_dir + "VAE_interpreter.binary"; ModelFile<float> model_file; model_file.loadModelBinary(model_filename, model); model.setId(1); model.setName("VAE1"); ModelInterpreterFileDefaultDevice<float> model_interpreter_file; model_interpreter_file.loadModelInterpreterBinary(interpreter_filename, model_interpreters[0]); // FIX ME! //std::cout << "Modifying the learning rate..." << std::endl; //for (auto& weight_map : model.weights_) { // if (weight_map.second->getSolverOp()->getName() == "AdamOp") { // weight_map.second->getSolverOpShared()->setLearningRate(5e-6); // } //} } //std::vector<Model<float>> population = { model }; if (train_model) { // Train the model std::pair<std::vector<float>, std::vector<float>> model_errors = model_trainer.trainModel(model, data_simulator, input_nodes, model_logger, model_interpreters.front()); //// Evolve the population //std::vector<std::vector<std::tuple<int, std::string, float>>> models_validation_errors_per_generation = population_trainer.evolveModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, population_logger, input_nodes); //PopulationTrainerFile<float> population_trainer_file; //population_trainer_file.storeModels(population, "MNIST"); //population_trainer_file.storeModelValidations("MNISTErrors.csv", models_validation_errors_per_generation); } else { //// Evaluate the population //population_trainer.evaluateModels( // population, model_trainer, model_interpreters, model_replicator, data_simulator, model_logger, input_nodes); } } /// cmd: MNIST_VAE_example C:/Users/dmccloskey/Documents/GitHub/mnist/ true true int main(int argc, char** argv) { // Parse the user commands std::string data_dir = "C:/Users/dmccloskey/Documents/GitHub/mnist/"; //std::string data_dir = "/home/user/data/"; //std::string data_dir = "C:/Users/domccl/GitHub/mnist/"; bool make_model = true, train_model = true; if (argc >= 2) { data_dir = argv[1]; } if (argc >= 3) { make_model = (argv[2] == std::string("true")) ? true : false; } if (argc >= 4) { train_model = (argv[3] == std::string("true")) ? true : false; } // run the application main_MNIST(data_dir, make_model, train_model); return 0; }<file_sep>/**TODO: Add copyright*/ #ifndef EVONET_MODELINTERPRETERFILEDEFAULTDEVICE_H #define EVONET_MODELINTERPRETERFILEDEFAULTDEVICE_H // .h #include <EvoNet/ml/ModelInterpreterDefaultDevice.h> #include <EvoNet/io/ModelInterpreterFile.h> namespace EvoNet { /** @brief ModelInterpreterFileDefaultDevice */ template<typename TensorT> class ModelInterpreterFileDefaultDevice : public ModelInterpreterFile<TensorT, ModelInterpreterDefaultDevice<TensorT>> { public: ModelInterpreterFileDefaultDevice() = default; ///< Default constructor ~ModelInterpreterFileDefaultDevice() = default; ///< Default destructor private: friend class cereal::access; template<class Archive> void serialize(Archive& archive) { archive(cereal::base_class<ModelInterpreterFile<TensorT, ModelInterpreterDefaultDevice<TensorT>>>(this)); } }; } #endif //EVONET_MODELINTERPRETERFILEDEFAULTDEVICE_H<file_sep> #------------------------------------------------------------------------------ # This cmake file enables generation of gcov files. # add compiler flag # -> requires clang > 3.1 or gcc > 4.8 (will be checked in module) if (MSVC) message(FATAL_ERROR "Coverage tests with gcov/lcov can only be enabled for GCC or Clang in a UNIX environment") else() # add Coverage also for compiler add_definitions( -fprofile-arcs -ftest-coverage ) # add Coverage also for linker set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage") set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage") include(cmake/modules/CodeCoverage.cmake) # in addition to system paths, also ignore includes from the prefix path (e.g. contrib libs) setup_target_for_coverage(EvoNet_coverage coverage ${CMAKE_PREFIX_PATH}) message("Coverage tests enabled. You can run 'make EvoNet_coverage'") endif()
862ac7671e6a19116ab4290f01c885e10ebbb8c2
[ "Python", "CMake", "C++", "reStructuredText" ]
205
C++
dmccloskey/EvoNet
47a19a804b65daef712418b4e278704b340d20b9
08b680fac059c34ce069bfd9ea42e8c193922281
refs/heads/master
<repo_name>jerme404/blockexploder<file_sep>/src/store/modules/Explorer/getters.js import Config from '@/config'; import Utils from '@/utils'; import moment from 'moment'; const getters = { supply: (state) => { let supply = { total: Config.coinSupplyTotal, circulating: 0, emissionPercent: 0, reward: 0 }; if (!state.networkInfo) { return supply; } if (state.recentBlocks && state.recentBlocks.length > 0) { let reward = state.recentBlocks[state.recentBlocks.length - 1].reward; supply.reward = Utils.decimalUnits(reward); } if (state.generatedCoins) { supply.circulating = Utils.decimalUnits(state.generatedCoins); supply.emissionPercent = (state.generatedCoins / Config.coinSupplyTotal) * 100; } return supply; }, netStats: (state) => { if (!state.networkInfo) { return {}; } let netStats = { txCount: state.networkInfo.tx_count }; if (state.networkInfo.difficulty) { netStats.difficulty = Utils.displayUnits(state.networkInfo.difficulty, 2); netStats.hashrate = Utils.hashrate(state.networkInfo.difficulty); } return netStats; }, averageSolveTime: (state) => { if (!state.recentBlocks || state.recentBlocks.length === 0) { return; } let blocks = state.recentBlocks.slice(), blockTimes = []; let lastTimeStamp = blocks.pop().timestamp; while (blocks.length > 0) { let timestamp = blocks.pop().timestamp; blockTimes.push(lastTimeStamp - timestamp); lastTimeStamp = timestamp; } return blockTimes.reduce((sum, time) => sum + time, 0) / (blockTimes.length - 1); }, recentBlockList: (state) => { if (!state.recentBlocks) { return []; } let recentBlocks = [...state.recentBlocks].map((block) => { let timeStamp = moment.unix(block.timestamp); return Object.assign({ blockSize: `${Utils.blockSize(block.block_size)}B`, timeAgo: timeStamp.fromNow(), timeStamp: timeStamp.format('L LT'), }, block); }); recentBlocks.sort((a, b) => b.height - a.height); return recentBlocks; }, txPoolList: (state) => { if (!state.txPool) { return []; } let txPool = [...state.txPool].map((tx) => { return Object.assign({ txFee: Utils.decimalUnits(tx.fee).toFixed(Config.coinUnitPlaces), timeStamp: moment.unix(tx.receive_time).format('L LT'), }, tx); }); txPool.sort((a, b) => b.receive_time - a.receive_time); return txPool; }, blockChart: (state) => { if (!state.recentBlocks || state.recentBlocks.length === 0) { return; } let chartData = state.recentBlocks.reduce((series, block) => { // Hashrates let hashrate = block.difficulty / Config.blockTarget; series.hashrate.data.push(hashrate); // Solve time let solveTime = series.lastBlockTime ? block.timestamp - series.lastBlockTime : 60; series.blockTime.data.push(solveTime); series.blocks.push(block.height); series.lastBlockTime = block.timestamp; return series; }, { hashrate: { name: 'Hashrate', data: []}, blockTime: { name: 'BlockTime', data: [], type: 'column' }, blocks: [] }); let hashMin = Math.min(...chartData.hashrate.data), hashMax = Math.max(...chartData.hashrate.data); let solveMin = Math.min(...chartData.blockTime.data), solveMax = Math.max(...chartData.blockTime.data); return { yAxis: [ { seriesName: 'Hashrate', decimalsInFloat: 0, min: hashMin - (hashMin/20), max: hashMax + (hashMax/50), labels: { show: false } }, { seriesName: 'BlockTime', decimalsInFloat: 0, min: 0, max: solveMax + (solveMax/2), opposite: true, labels: { show: false } }, ], xAxis: { categories: chartData.blocks }, series: [ chartData.hashrate, chartData.blockTime ] }; } }; export default getters; <file_sep>/src/utils/index.js export { default } from './utils'; <file_sep>/src/views/Explorer/index.js export { default } from './Explorer.vue'; <file_sep>/src/views/Api/index.js export { default } from './Api.vue'; <file_sep>/src/config/config.js export default { coinName: 'NERVA', coinSymbol: 'XNV', coinUnitPlaces: 12, coinUriPrefix: 'nerva:', coinSupplyTotal: 18446744073709551615, addressPrefix: 0x3800, integratedAddressPrefix: 0x7081, subAddressPrefix: 0x1080, blockTarget: 60, updateInterval: 15000, // Auto refresh interval in milliseconds explorerApi: 'https://us-central1-nerva-248022.cloudfunctions.net/nervaApi', websiteUrl: 'https://getnerva.org', footerAppTitle: 'NERVA Block Explorer', copyright: '2019 NERVA', footerLinks: [ { name: 'BitBucket', icon: 'fab fa-bitbucket', url: 'https://bitbucket.org/nerva-project' }, { name: 'Discord', icon: 'fab fa-discord', url: 'https://discord.gg/jsdbEns/' }, { name: 'Reddit', icon: 'fab fa-reddit-alien', url: 'https://www.reddit.com/r/Nerva/' }, { name: 'Twitter', icon: 'fab fa-twitter', url: 'http://twitter.com/NervaCurrency' }, { name: 'Telegram', icon: 'fab fa-telegram-plane', url: 'https://t.me/NervaXNV' } ] }; <file_sep>/src/theme.js export default { // Standard vuetify theme overrides. primary: { base: '#2C3033', darken1: '#222429', darken2: '#202528', lighten1: '#34383b', lighten2: '#53585B' }, secondary: { base: '#D4D7D9', darken1: '#AFB1B3', darken2: '#707273', lighten1: '#EDF0F2', lighten2: '#FAFDFF' }, accent: { base: '#7D83FF', darken1: '#646AE6', darken2: '#4A50CC', lighten1: '#979DFF', lighten2: '#B0B6FF' }, error: { base: '#F2555A', darken1: '#BF4347', darken2: '#A63A3E', lighten1: '#FF595F' }, info: '#35A7FF', success: { base: '#13F2B3', darken1: '#11D9A0', darken2: '#0EB384', lighten1: '#14FFBD' }, warning: '#FFC107' }; <file_sep>/README.md # blockexploder ![explorer screenshot](docs/img/exploder-screenshot.png) ## Project setup #### Install vue-cli dev tools ```bash npm install vue-cli -g ``` #### Clone the project and install dependencies ```bash git clone https://github.com/jerme404/blockexploder cd blockexploder npm install ``` #### Compile with hot-reloading for development ```bash npm run dev ``` #### Compile and minify for production ```bash npm run build ``` #### Build with URL path Set `VUE_PUBLIC_PATH=/<path>/` in `./env.withpath` ```bash npm run build-with-path ``` #### Hosting Some specific configuration may be needed depending on your web server. For nginx ```nginx location / { try_files $uri $uri/ /index.html; } ``` See the [vue-router history mode docs](https://router.vuejs.org/guide/essentials/history-mode.html#example-server-configurations). <file_sep>/src/store/index.js import Vue from 'vue'; import Vuex from 'vuex'; import Explorer from './modules/Explorer'; Vue.use(Vuex); export default new Vuex.Store({ modules: { explorer: Explorer }, }); <file_sep>/docs/forking-guide.md # Forking blockexploder ### Styling Put your logo in `/src/assets`. I might only be using `logo-color.png`. Replace `/public/favicon.ico` with your own favicon. Set your theme colors in `/src/theme.js`. ### Coin Config Update `/src/config/config.js`. ### Daemon API I'm pointing at a Google cloud function, which just adds CORS and forwards requests to the Nerva PHP API. All of the response objects are normal Monero-style `json_rpc` format. <file_sep>/src/views/Tools/index.js export { default } from './Tools.vue'; <file_sep>/src/views/Detail/components/BlockDisplay/index.js export { default } from './BlockDisplay.vue'; <file_sep>/src/router/router.js import Vue from 'vue'; import Router from 'vue-router'; Vue.use(Router); export default new Router({ mode: 'history', base: process.env.BASE_URL, scrollBehavior (to, from, savedPosition) { return { x: 0, y: 0 }; }, routes: [ { path: '/', name: 'explorer', component: () => import('@/views/Explorer') }, { path: '/detail/:param?', name: 'detail', component: () => import('@/views/Detail') }, { path: '/tools', name: 'tools', component: () => import('@/views/Tools') }, { path: '/api', name: 'api', component: () => import('@/views/Api') }, { path: '*', component: () => import('@/views/NotFound') } ] }); <file_sep>/src/store/modules/Explorer/state.js import ExplorerService from './services/Explorer'; const state = { updateTimer: undefined, updateLoading: false, explorerService: undefined, networkInfo: {}, txPool: [], generatedCoins: 0, recentBlocks: [], }; state.explorerService = new ExplorerService(); export default state;
d7353ee88e7e4a7148c177b0d8f1c7e4ea440f23
[ "JavaScript", "Markdown" ]
13
JavaScript
jerme404/blockexploder
e7ef232ae35ea1b8091a4b92f20ae8887771c746
689026d24877f93a8aaa94ab4696cf02fcb2d07c
refs/heads/master
<repo_name>tonyvazgar/IASerching<file_sep>/Result.java import java.util.Vector; public class Result { private boolean found; private Vector<Node> plan; public Result(){ found = false; plan = null; } public void setFound(boolean hasBeenFound){ found = hasBeenFound; } public void setPlan(Vector<Node> aPlan){ plan = aPlan; } public Vector<Node> getPlan(){ return plan; } } <file_sep>/Situation.java public class Situation { int matrix [][]; public Situation(){ matrix = new int[3][3]; } public void setMatrix(int [][] aMatrix){ matrix = aMatrix; } public boolean equals(Object anObject){ int i, j; boolean areEqual; Situation anotherState = (Situation) anObject; areEqual = true; i = 0; while (i < 3) //Renglones { j = 0; while (j < 3) //Columnas { if(matrix[i][j] != anotherState.matrix[i][j]) { areEqual = false; } j = j + 1; } i = i + 1; } return areEqual; } @Override public String toString() { String string; string = "\n"; string = string + matrix[0][0] + " "; string = string + matrix[0][1] + " "; string = string + matrix[0][2] + "\n"; string = string + matrix[1][0] + " "; string = string + matrix[1][1] + " "; string = string + matrix[1][2] + "\n"; string = string + matrix[2][0] + " "; string = string + matrix[2][1] + " "; string = string + matrix[2][2] + "\n" + "\n"; return string; } public int heuristicFunction() { int value = manhattanDistance(); return value; } private int manhattanDistance() { int manhattanDistance; int i,j; int token; int targetX; int targetY; int differenceX; int differenceY; manhattanDistance = 0; i = 0; while (i < 3){ j = 0; while (j < 3){ token = matrix[i][j]; if(token != 0){ //We dont compute distance for element 0 targetX = (token - 1) / 3; //expected x-coordinate (row) targetY = (token - 1) % 3; //expected y-coordinate (col) differenceX = i - targetX; //x-distance to expected coordinate differenceY = j - targetY; //y-distance to expected coordinate manhattanDistance = manhattanDistance + Math.abs(differenceX) + Math.abs(differenceY); } j = j + 1; } i = i + 1; } return manhattanDistance; } } <file_sep>/Search.java import java.util.Vector; public abstract class Search { public static Node foundNode; public static boolean found; public static Result result; public static Vector <Situation> memory; public static boolean nodeIsFinalState(Node aNode, Node goal){ //Prueba para ver si es el estado final o no boolean isFinalState = false; if(aNode.getState().equals(goal.getState())) { isFinalState = true; } return isFinalState; } } <file_sep>/BestFirst.java import java.util.Vector; public abstract class BestFirst extends Search { public static Result search(Situation initialState, Situation finalState){ Vector<Node> route; Node root, goal; root = new Node(); root.setState(initialState); goal = new Node(); goal.setState(finalState); memory = new Vector<Situation>(); found = false; result = new Result(); if(root != null){ if(!nodeIsFinalState(root,goal)){ result = bestFirst(root, goal); }else{ result.setFound(true); route = new Vector<Node>(); route.add(root); result.setPlan(route); found = true; } }//End if(root != null) System.out.println("MOVES: " + (result.getPlan().size()-1)); System.out.println("EXPANTIONS: " + Node.numberOfExpantions); return result; } private static Result bestFirst(Node node, Node goal) { Node nextChild; int i; Vector<Node> route; if(!found){ node.expand(); memory.add(node.getState()); node.sortChildren(); if(node.getChildren() != null){ i = 0; while ((i < node.getChildren().size()) && !found){ nextChild = node.getChildren().get(i); if(nodeIsFinalState(nextChild, goal)){ found = true; foundNode = nextChild; }else if(!memory.contains(nextChild.getState())){ result = bestFirst(nextChild, goal); } i = i + 1; }//End while }//End if(node.getChildren() != null) }//end if !found if(found){ result.setFound(true); route = foundNode.getRoute(); result.setPlan(route); }else{ result.setFound(false); result.setPlan(null); } return result; }//End bestFirst }
3c55a04ff948ae332f8d8a0af1752edb67c7d35f
[ "Java" ]
4
Java
tonyvazgar/IASerching
e2d85135df82657e600710808851532c30e88037
3b4f860d3bfc11ad792fe1e1bc82b2a79345a20e
refs/heads/master
<file_sep>import heapq # Represents a Huffman tree for use in encoding/decoding strings. # A sample usage is as follows: # # h = HuffmanTree([('A', 2), ('B', 7), ('C', 1)]) # assert(h.encode('ABC') == '01100') # assert(h.decode(h.encode('ABC')) == 'ABC') class HuffmanTree: # Helper object for building the Huffman tree. # You may modify this constructor but the grading script rlies on the left, right, and symbol fields. class TreeNode: def __init__ (self): self.left = None self.right = None self.symbol = None self.min_element = None # The `symbol_list` argument should be a list of tuples `(symbol, weight)`, # where `symbol` is a symbol that can be encoded, and `weight` is the # the unnormalized probabilitiy of that symbol appearing. def __init__(self, symbol_list): assert(len(symbol_list) >= 2) # YOUR CODE HERE self.root = self.build_tree(symbol_list) # (place TreeNode object here) def build_tree(self, symbol_list): symbol_hpq = self._make_heap(symbol_list) # keep looping until 1 node left while len(symbol_hpq) > 1: first_tuple, second_tuple = self._get_next_two_nodes(symbol_hpq) new_node = self.TreeNode() # create new node # isolate nodes from tuple left_node = first_tuple[1] right_node = second_tuple[1] # set left and right for new node new_node.left = left_node new_node.right = right_node new_node.min_element = left_node.min_element # set min element to the left's new_weight = first_tuple[0] + second_tuple[0] # combine the weight new_tuple = (new_weight, new_node) # create new tuple heapq.heappush(symbol_hpq, new_tuple) # push new tuple back into heap # return the last node return symbol_hpq[0][1] def _get_next_two_nodes(self, heap1): ''' This function uses a second heap to retrieve the next 2 min elements from a heapq ''' first_tuple = heapq.heappop(heap1) second_tuple = heapq.heappop(heap1) first_weight = first_tuple[0] second_weight = second_tuple[0] # if there are still items in hpq, check to see if they're the same weight as second tuple # of if first and second tuples have same weights, enter if if len(heap1) > 0 and (heap1[0][0] == second_weight or first_weight == second_weight): # store in a second heap, where are the weights are the same # and you sort by the min element this time heap2 = [] heapq.heapify(heap2) # grab just the node first_node = first_tuple[1] second_node = second_tuple[1] # create new tuple with min element and tree node first_heap2_tuple = (first_node.min_element, first_weight, first_node) second_heap2_tuple = (second_node.min_element, second_weight, second_node) # push the new tuple heapq.heappush(heap2, first_heap2_tuple) heapq.heappush(heap2, second_heap2_tuple) # loop through first heap and grab all the same weighted elements while len(heap1) > 0 and heap1[0][0] == second_weight: # this peeks at the first elem weight heap1_tuple = heapq.heappop(heap1) # pop from heap1 node_weight = heap1_tuple[0] # get node weight node = heap1_tuple[1] # get node heap2_tuple = (node.min_element, node_weight, node) # create heap2 tuple heapq.heappush(heap2, heap2_tuple) # push to tuple to heap2 # now grab the min 2 elements from heap2 first_from_heap2 = heapq.heappop(heap2) second_from_heap2 = heapq.heappop(heap2) # assign the tuples for the return values first_tuple = (first_from_heap2[1], first_from_heap2[2]) second_tuple = (second_from_heap2[1], second_from_heap2[2]) # put the remaining back into the first heap hpq # making sure to use the original weight as first elem in tuple while len(heap2) > 0: remaining_tuple = heapq.heappop(heap2) remaining_node_weight = remaining_tuple[1] remaining_node = remaining_tuple[2] insert_tuple = (remaining_node_weight, remaining_node) heapq.heappush(heap1, insert_tuple) # print(heap2) # return the tuples (weight, node) return (first_tuple, second_tuple) def _make_heap(self, symbol_list): li = [] heapq.heapify(li) # run through symbol list, create tree nodes, and add tuples to a heap for elem in symbol_list: # convert each symbol pair to a treenode new_node = self.TreeNode() new_node.symbol = elem[0] # set to char new_node.min_element = elem[0] # set to char temp = (elem[1],new_node) # create a temp tuple with the weight and treenode heapq.heappush(li, temp) # heap push the tuple return li # Encodes a string of characters into a string of bits using the # symbol/weight list provided. def encode(self, s): assert(s is not None) encoded_str = '' for char in s: char_code = '' encoded_char = self._encode_helper(self.root, char, char_code) encoded_str += encoded_char return encoded_str def _encode_helper(self, root, char, char_code): # if a leaf node, it will have a letter if root.symbol is not None: # if it's the right letter, return the char_code if root.symbol == char: return char_code # else it's not the letter, so return empty str else: return '' # if not a leaf node, recursively call left = self._encode_helper(root.left, char, char_code + '0') right = self._encode_helper(root.right, char, char_code + '1') return left + right # Decodes a string of bits into a string of characters using the # symbol/weight list provided. def decode(self,encoded_msg): assert(encoded_msg is not None) # YOUR CODE HERE msg_so_far = '' decoded_msg = self._decode_helper(self.root, encoded_msg, msg_so_far) return decoded_msg def _decode_helper(self, root, encoded_msg, msg_so_far): # if a leaf node if root.symbol is not None: msg_so_far += root.symbol root = self.root # not a leaf node # check if still have chars in encoded_msg # if encoded msg is not empty if len(encoded_msg) > 0: # get the next char next_char = encoded_msg[0] # if next_char is 0, go left if next_char == '0': msg_so_far += self._decode_helper(root.left, encoded_msg[1:], msg_so_far) elif next_char == '1': msg_so_far += self._decode_helper(root.right, encoded_msg[1:], msg_so_far) return msg_so_far # testing h = HuffmanTree([('A', 1), ('B', 1), ('C', 1)]) encoded = h.encode('ABC') decoded = h.decode('10110') print encoded print decoded <file_sep># Implementation of the Boyer-Moore majority vote algorithm. # # Given a list of elements `l`, one would use this class in the following way # to get the element that appears the majority of the time in the list: # class BoyerMooreMajority: def __init__(self): self.guess = None self.counter = 0 # Registers another element to be considered by the algorithm. This will # influence the majority element guess returned by `get_majority`. def add_next_element(self, element): assert(element is not None) # if counter is 0, set the guess to the element if self.counter == 0: self.guess = element # if counter not 0, there's a current guess, update its count # based on what the element is if element == self.guess: self.counter += 1 # increase if matched else: # decreased if not matched self.counter -= 1 # Gives the best guess of which of the elements seen so far make up the # majority of the elements in set of elements. If a majority element exists, # this algorithm will report it correctly. Otherwise, there is no guarantee # about the output. def get_majority(self): return self.guess # return the current guess # l = [2, 2, 2, 4, 5, 5, 2, 2, 3] # b = BoyerMooreMajority() # for elem in l: # b.add_next_element(elem) # print b.get_majority()<file_sep>import heapq a = [(1, 'A', 4), (1, 'A', 3), (1, 'A', 2)] li = [] heapq.heapify(li) for elem in a: heapq.heappush(li, elem) while len(li) > 0: print heapq.heappop(li)<file_sep>import heapq # Represents a Huffman tree for use in encoding/decoding strings. # A sample usage is as follows: # # h = HuffmanTree([('A', 2), ('B', 7), ('C', 1)]) # assert(h.encode('ABC') == '01100') # assert(h.decode(h.encode('ABC')) == 'ABC') def BinaryTreeToString(root): if root.symbol is not None: return root.symbol else: return "(%s%s)"%(BinaryTreeToString(root.left), BinaryTreeToString(root.right)) class HuffmanTree: # Helper object for building the Huffman tree. # You may modify this constructor but the grading script rlies on the left, right, and symbol fields. class TreeNode: def __init__ (self): self.left = None self.right = None self.symbol = None self.min_element = None # The `symbol_list` argument should be a list of tuples `(symbol, weight)`, # where `symbol` is a symbol that can be encoded, and `weight` is the # the unnormalized probabilitiy of that symbol appearing. def __init__(self, symbol_list): assert(len(symbol_list) >= 2) # YOUR CODE HERE self.root = self.build_tree(symbol_list) # (place TreeNode object here) def build_tree(self, symbol_list): ''' This function builds a huffman tree by first creating a min heap from the symbol list, popping 2 elements from the heap at a time, combines those into one node and reinserts the new node into the heap. It repeats this process until there is one remaining node, which is the root of the tree. ''' symbol_hpq = self._make_heap(symbol_list) # create a heap of the symbol list # keep looping until 1 node left while len(symbol_hpq) > 1: # grab the next two items from the heap, sorts by weight first first_tuple, second_tuple = self._get_next_two_nodes(symbol_hpq) new_node = self.TreeNode() # create new node # isolate nodes from tuple left_node = first_tuple[2] right_node = second_tuple[2] # set left and right for new node new_node.left = left_node new_node.right = right_node # check to see which has min element (since heap prioritizes first by the weight, # so could have instance where min element is different) if left_node.min_element < right_node.min_element: new_node.min_element = left_node.min_element else: new_node.min_element = right_node.min_element new_weight = first_tuple[0] + second_tuple[0] # combine the weight new_tuple = (new_weight, new_node.min_element, new_node) # create new tuple heapq.heappush(symbol_hpq, new_tuple) # push new tuple back into heap # return the last node return symbol_hpq[0][2] # it's the third element in the tuple def _get_next_two_nodes(self, heap1): ''' This function uses a second heap to retrieve the next 2 min elements from a heapq ''' first_tuple = heapq.heappop(heap1) second_tuple = heapq.heappop(heap1) return (first_tuple, second_tuple) def _make_heap(self, symbol_list): ''' This function creates a min heap from the symbol list. ''' li = [] # start with a list heapq.heapify(li) # heapify it # run through symbol list, create tree nodes, and add tuples to a heap for elem in symbol_list: # convert each symbol pair to a treenode weight = elem[1] # grab the weight new_node = self.TreeNode() # create a new treenode new_node.symbol = elem[0] # set the symbol to the first element, the letter new_node.min_element = elem[0] # set the min element initially to be the letter as well temp = (weight, new_node.min_element, new_node) # create a temp tuple with the weight and treenode heapq.heappush(li, temp) # heap push the tuple return li # return the li list (heapified) # Encodes a string of characters into a string of bits using the # symbol/weight list provided. def encode(self, s): ''' Takes a string s and encodes it into 0s and 1s based on traversing the huffman tree. ''' assert(s is not None) # if s is '', return same if len(s) == 0: return '' encoded_str = '' # the return ans # for every char in s for char in s: char_code = '' # use the encode helper function encoded_char = self._encode_helper(self.root, char, char_code) encoded_str += encoded_char # append to the running string return encoded_str def _encode_helper(self, root, char, char_code): ''' A helper function that recursively traverses the huffman tree until it reaches a leaf node, returning the symbol value ''' # if a leaf node, it will have a letter if root.symbol is not None: # if it's the right letter, return the char_code if root.symbol == char: return char_code # else it's not the letter, so return empty str else: return '' # if not a leaf node, recursively call, going both direction # will be empty is it doesn't find the leaf node matching the symbol left = self._encode_helper(root.left, char, char_code + '0') right = self._encode_helper(root.right, char, char_code + '1') return left + right # return both, since one will be empty # Decodes a string of bits into a string of characters using the # symbol/weight list provided. def decode(self,encoded_msg): ''' This function decodes a 0s and 1s string into letters, using a helper function to loop through the string, finding one letter at a time. ''' assert(encoded_msg is not None) # if string len empty, return '' if len(encoded_msg) == 0: return '' msg_so_far = '' # the starting message # call the helper function decoded_msg = self._decode_helper(self.root, encoded_msg, msg_so_far) return decoded_msg def _decode_helper(self, root, encoded_msg, msg_so_far): ''' This helper function loops through the encoded message and traverses the tree based on a 0 or 1, going left or right, respectively. When it reaches a leaf node, it appends a letter, and starts the current root node back to the top of the tree. ''' # loop through encoded 0s and 1s while len(encoded_msg) > 0: # if leaf node if root.symbol is not None: msg_so_far += root.symbol # append to msg root = self.root # restart root from the top continue next_char = encoded_msg[0] # peek at the next char # if 0 or 1, go left or right if next_char == '0': root = root.left elif next_char == '1': root = root.right # after, remove the first letter in encoded message encoded_msg = encoded_msg[1:] # update the encoded msg # need to do one more check after while loop if root.symbol is not None: msg_so_far += root.symbol # append to msg # have leftover str so cant decode else: # the way to tell if the encoded message can't be decoded is # when the last letter traverses the tree and lands on a # non leaf node, in which case, return None return None # return the decoded message return msg_so_far
06ffc733364817a003fbc06317711d2285efd51f
[ "Python" ]
4
Python
exnx/huffman-encoding
2980a03371ab986e656dd4885c859fdd2e4d6d67
23e3b76a5b41ed9bcaea0fb93bc05a4c5c36bb2b
refs/heads/master
<file_sep>#!/usr/bin/env python # coding=utf8 # Filename: monitor_idc.py # Last modified: 2013-04-23 16:54 # Author: itnihao # Mail: <EMAIL> # Description: import urllib, urllib2,sys,re monitor_item = sys.argv[1] idc = sys.argv[2] url = "http://www.iqm.cn/index.php/Member/RTTask/getmonitorInfoByAjax" page_url = "http://www.iqm.cn/index.php/Member/RTTask" web_monitor = "http://testidc.orshsoft.com" data = "monitorip="+idc + "&url="+web_monitor+"&host=0.0.0.0&bandwidth=512&task_type=get" data = data.encode("utf8") def web_site_status(): request = urllib2.Request(url,data) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor()) response = opener.open(request) the_page = response.read() values = eval(the_page) #for v in values: # for i in v: # print i, "-------------",v[i] for v in values: code=v["time"] pat = re.compile(r'HTTP.+?OK') code_status= re.findall(pat, code) if monitor_item == "status": print str(code_status)[13:16] else: print v[monitor_item] def idc_site(): s=urllib2.urlopen(page_url).read() pat=re.compile(r'type="checkbox" value=".+?[0-9]+" id=') urls= re.findall(pat, s) for i in urls: i=i.replace('type="checkbox" value="', '') idc= i.replace('" id=', '') print idc #idc_site() web_site_status() <file_sep>#!/usr/bin/env python # coding=utf8 # Last modified: 2013-04-12 14:47 # Author: itnihao # Mail: <EMAIL> import os import json r=open('WEB.txt','r').read().split() devices = [] for devpath in r: device = os.path.basename(devpath) devices += [{'{#SITENAME}':device}] print json.dumps({'data':devices},sort_keys=True,indent=7,separators=(',',':')) <file_sep>#!/bin/bash # function:monitor tcp connect status from zabbix # License: GPL # mail:<EMAIL> # version:1.0 date:2013-01-16 source /etc/bashrc >/dev/null 2>&1 source /etc/profile >/dev/null 2>&1 [ -f /etc/zabbix/scripts/WEB.txt ] [ "$?" != 0 ] && touch /etc/zabbix/scripts/website/website.txt #/usr/bin/curl -o /dev/null -s -w %{http_code} http://$1/ WEB_SITE_discovery () { WEB_SITE=($(cat /etc/zabbix/scripts/website/website.txt|grep -v "^#")) printf '{\n' printf '\t"data":[\n' for((i=0;i<${#WEB_SITE[@]};++i)) { num=$(echo $((${#WEB_SITE[@]}-1))) if [ "$i" != ${num} ]; then printf "\t\t{ \n" printf "\t\t\t\"{#SITENAME}\":\"${WEB_SITE[$i]}\"},\n" else printf "\t\t{ \n" printf "\t\t\t\"{#SITENAME}\":\"${WEB_SITE[$num]}\"}]}\n" fi } } web_site_code () { for i in 1 2 3 do code=$(/usr/bin/curl -o /dev/null --connect-timeout 0.5 -s -w %{http_code} -A "Zabbix Web Monitor" $1) if [ ${code} != '000' ];then break fi done if [ ${code} == '000' ];then echo 1 else echo ${code} fi } case "$1" in web_site_discovery) WEB_SITE_discovery ;; web_site_code) web_site_code $2 ;; *) echo "Usage:$0 {web_site_discovery|web_site_code [URL]}" ;; esac <file_sep>#!/usr/bin/env python # coding=utf8 # Last modified: 2013-04-12 14:47 # Author: itnihao # Mail: <EMAIL> import os import json r=open('/proc/mounts','r').read().split("\n") devices = [] for i in r: if len(i) > 0: d=i.split() print d result=d[1].find('chroot') print d[1] if result != 0: devices += [{"{#FSNAME}":d[1],"{#FSTYPE}":d[2]}] #for devpath in r: # device = os.path.basename(devpath) # devices += [{'{#SITENAME}':device}] #print json.dumps({'data':devices},sort_keys=True,indent=7,separators=(',',':')) #{"{#FSNAME}":"\/var\/named\/chroot\/etc\/named.root.key","{#FSTYPE}":"ext4"} <file_sep>``` # sh partitiontables.sh Ready to partition tables. Ready to update permissions of Zabbix user to create routines Enter root DB user: root Enter root password: <PASSWORD> Do you want to backup the database (recommended) (Y/n): y Enter output file, press return for default of /tmp/zabbix.sql Mysqldump succeeded!, proceeding with upgrade... Ready to proceed: Starting yearly partioning at: 2014 and ending at: 2014 With 90 days of daily history Ready to proceed (Y/n): y Altering table: history Altering table: history_log Altering table: history_str Altering table: history_text Altering table: history_uint Altering table: trends Altering table: trends_uint Creating monthly partitions for table: trends Creating monthly partitions for table: trends_uint Creating daily partitions for table: history Creating daily partitions for table: history_log Creating daily partitions for table: history_str Creating daily partitions for table: history_text Creating daily partitions for table: history_uint Ready to apply script to database, this may take a while.(Y/n): y Altering tables history history_log history_str history_text history_uint trends trends_uint trends trends_uint history history_log history_str history_text history_uint Installing procedures Do you want to update the /etc/zabbix/zabbix_server.conf to disable housekeeping (Y/n): n Do you want to update the crontab (Y/n): y The crontab entry can be either in /etc/cron.daily, or added to the crontab for root Do you want to add this to the /etc/cron.daily directory (Y/n): y Enter email of who should get the daily housekeeping reports: ``` <file_sep>#!/bin/bash for group in Zabbix-Server do for items in "Memory usage" "CPU utilization" "Network TCP Connect status" "CPU load" do k=$(echo ${items}|sed "s/ /_/g") python screen_creator.py -c config --add-all-group ${group} "${items}" --hsize=2 --vsize=11 --width=500 --height=100 "${group}_${k}" done done <file_sep>#!/bin/sh ################################################################# # Zabbix trapper monitor ##### # Version: 0.1 - 20011/08/09 # Author: <NAME> < <EMAIL>> ##### # Version: 0.2 - 20012/05/04 # Author: <NAME> < <EMAIL> > ##### # Changelog: # - v0.1: replace (ugly) vfs.fs.size & custom.vfs.dev.* metrics # from customs checks to zabbix trapper implementation # - v0.2: add inode support # change ItemName to custom.ItemName # change /dev/sd* devise support to /dev/xvd* (ubuntu LTS 12.04) ################################################################# # vfs.fs.size metrics df -Pk | awk ' /^\/dev\// { print "- custom.vfs.fs.size[" $6 ",total] " $2 print "- custom.vfs.fs.size[" $6 ",used] " $3 print "- custom.vfs.fs.size[" $6 ",free] " $4 print "- custom.vfs.fs.size[" $6 ",pfree] " 100 - $5 print "- custom.vfs.fs.size[" $6 ",pused] " 0 + $5 }' # vfs.fs.inode metrics df -Pki | awk ' /^\/dev\// { print "- custom.vfs.fs.inode[" $6 ",total] " $2 print "- custom.vfs.fs.inode[" $6 ",used] " $3 print "- custom.vfs.fs.inode[" $6 ",free] " $4 print "- custom.vfs.fs.inode[" $6 ",pfree] " 100 - $5 print "- custom.vfs.fs.inode[" $6 ",pused] " 0 + $5 }' # custom.vfs.dev. grep xvd /proc/diskstats | awk '{ if ( $3=="xvda1" ) disk = "xvda" else disk = $3 print "- custom.vfs.dev.io.active["disk"] "$12 print "- custom.vfs.dev.io.ms["disk"] "$13 print "- custom.vfs.dev.read.ms["disk"] "$7 print "- custom.vfs.dev.read.ops["disk"] "$4 print "- custom.vfs.dev.read.sectors["disk"] "$6 print "- custom.vfs.dev.write.ms["disk"] "$11 print "- custom.vfs.dev.write.ops["disk"] "$8 print "- custom.vfs.dev.write.sectors["disk"] "$10 }' <file_sep> 概述 == screen_create是一个批量创建screen的工具。 功能: == 支持分组创建screen 支持单个主机创建所有screen 用法 == 可以用如下脚本创建一个分组的screen,包括 CPU TCP LOAD指标的screen,如需添加更多图,可以自行添加即可 ``` #!/bin/bash for group in Zabbix-Server do for items in "Memory usage" "CPU utilization" "Network TCP Connect status" "CPU load" do k=$(echo ${items}|sed "s/ /_/g") python screen_creator.py -c config --add-all-group ${group} "${items}" --hsize=2 --vsize=11 --width=500 --height=100 "${group}_${k}" done done ``` <file_sep>``` shell#rpm -ivh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm ``` ##install salt-master ``` shell#yum install salt-master ``` ##install salt-minion ``` shell#yum install salt-minion ``` #configuration salt-minion ``` shell#vim /etc/salt/minion master: salt-master.itnihao.com #master IP or DNS id: zabbix-agent.itnihao.com #minion ID ``` #salt-key ``` shell#salt-key -a zabbix-agent.itnihao.com ``` #top.sls ``` shell#mkdir /srv/salt/ ``` #highstate ``` shell#salt '*' state.highstate ``` <file_sep>#!/bin/bash # Script to fetch haproxy statuses for tribily monitoring systems # Author: <EMAIL> # License: GPLv2 # Set Variables IPADDR="www.itnihao.com" PORT="80" DATE=`date +%Y%m%d` #http://www.itnihao.com/backend/haproxy-stats;csv cd /tmp/ wget --http-user=itnihao --http-password=<PASSWORD>ao $IPADDR:$PORT/backend/haproxy-stats\;csv -O haproxy_stats_$DATE.main.csv -o /dev/null FILE="/tmp/haproxy_stats_$DATE.main.csv" # Write the functions # Status of Servers function fend_status { grep "$1," $FILE | grep FRONTEND | cut -f18 -d, } function bend_status { grep "$1," $FILE | grep BACKEND | cut -f18 -d, } # Queue Informations function bend_qcur { grep "$1," $FILE | grep BACKEND | cut -f3 -d, } function bend_qmax { grep "$1," $FILE | grep BACKEND | cut -f4 -d, } # Session Informations function fend_scur { grep "$1," $FILE | grep FRONTEND | cut -f5 -d, } function fend_smax { grep "$1," $FILE | grep FRONTEND | cut -f6 -d, } function bend_scur { grep "$1," $FILE | grep BACKEND | cut -f5 -d, } function bend_smax { grep "$1," $FILE | grep BACKEND | cut -f6 -d, } function fend_stot { grep "$1," $FILE | grep FRONTEND | cut -f8 -d, } function bend_stot { grep "$1," $FILE | grep BACKEND | cut -f8 -d, } # Traffic Information function fend_bytes_in { grep "$1," $FILE | grep FRONTEND | cut -f9 -d, } function fend_bytes_out { grep "$1," $FILE | grep FRONTEND | cut -f10 -d, } function bend_bytes_in { grep "$1," $FILE | grep BACKEND | cut -f9 -d, } function bend_bytes_out { grep "$1," $FILE | grep BACKEND | cut -f10 -d, } # Error Information function fend_err_req { grep "$1," $FILE | grep FRONTEND | cut -f13 -d, } function bend_err_conn { grep "$1," $FILE | grep BACKEND | cut -f14 -d, } function bend_err_resp { grep "$1," $FILE | grep BACKEND | cut -f15 -d, } # Warning Information function bend_warn_retr { grep "$1," $FILE | grep BACKEND | cut -f16 -d, } function bend_warn_redis { grep "$1," $FILE | grep BACKEND | cut -f17 -d, } # Downtime Information function bend_down_cur { STATUS=$(grep "$1," $FILE | grep BACKEND | cut -f18 -d,) if [ "$STATUS" == "DOWN" ] then grep "$1," $FILE | grep BACKEND | cut -f24 -d, else echo "0" fi } function bend_down_tot { grep "$1," $FILE | grep BACKEND | cut -f25 -d, } # Uptime Information function bend_uptime_cur { STATUS=`grep "$1," $FILE | grep BACKEND | cut -f18 -d,` if [ "$STATUS" == "UP" ] then grep "$1," $FILE | grep BACKEND | cut -f24 -d, else echo "0" fi } # Version Information # Donot edit this section ever function tribily_ver { VERSION="1.0" echo ${VERSION} } # Run the requested function case "$1" in fend_status) fend_status $2 ;; bend_status) bend_status $2 ;; bend_qcur) bend_qcur $2 ;; bend_qmax) bend_qmax $2 ;; fend_scur) fend_scur $2 ;; fend_smax) fend_smax $2 ;; bend_scur) bend_scur $2 ;; bend_smax) bend_smax $2 ;; fend_stot) fend_stot $2 ;; bend_stot) bend_stot $2 ;; fend_bytes_in) fend_bytes_in $2 ;; fend_bytes_out) fend_bytes_out $2 ;; bend_bytes_in) bend_bytes_in $2 ;; bend_bytes_out) bend_bytes_out $2 ;; fend_err_req) fend_err_req $2 ;; bend_err_conn) bend_err_conn $2 ;; bend_err_resp) bend_err_resp $2 ;; bend_warn_retr) bend_warn_retr $2 ;; bend_warn_redis) bend_warn_redis $2 ;; bend_down_cur) bend_down_cur $2 ;; bend_down_tot) bend_down_tot $2 ;; bend_uptime_cur) bend_uptime_cur $2 ;; *) echo "Useage $0 {fend_status|bend_status|bend_qcur|bend_qmax|fend_scur|fend_smax|bend_scur|bend_smax|fend_stot|bend_stot|fend_bytes_in|fend_bytes_out|bend_bytes_in|bend_bytes_out|fend_err_req|bend_err_conn|bend_err_resp|bend_warn_retr|bend_warn_redis|bend_down_cur|bend_down_tot|bend_uptime_cur} " ;; esac # Clean up #/bin/rm $FILE <file_sep>#解决的问题 Zabbix对文件系统的监控,是通过LLD来自动添加实现的,在通过Zabbix监控bind服务器的文件系统时,当bind启用了chroot,会把chroot的目录给监控上,但这个目录是虚拟的,不具备参考意义,所以需要重新定义LLD的规则(把原先的文件系统LLD规则停用) #用法 ##1.脚本的存放位置 ``` shell# tree /etc/zabbix/ /etc/zabbix/ ├── scripts │   ├── \ │   └── fileSystemDiscoveryNotBind.py #权限775 ├── zabbix_agentd.conf └── zabbix_agentd.d ├── fileSystemDiscoveryNotBind.conf └── userparameter_mysql.conf ``` 如图所示 ![图1](img/000-2.png) ##2.测试 shell# zabbix_get -s 127.0.0.1 -k bind.vfs.fs.discovery ``` { "data":[ { "{#FSNAME}":"/", "{#FSTYPE}":"rootfs" }, { "{#FSNAME}":"/proc", "{#FSTYPE}":"proc" }, { "{#FSNAME}":"/sys", "{#FSTYPE}":"sysfs" }, { "{#FSNAME}":"/dev", "{#FSTYPE}":"devtmpfs" }, { "{#FSNAME}":"/dev/pts", "{#FSTYPE}":"devpts" }, { "{#FSNAME}":"/dev/shm", "{#FSTYPE}":"tmpfs" }, { "{#FSNAME}":"/", "{#FSTYPE}":"ext4" }, { "{#FSNAME}":"/proc/bus/usb", "{#FSTYPE}":"usbfs" }, { "{#FSNAME}":"/boot", "{#FSTYPE}":"ext4" }, { "{#FSNAME}":"/data", "{#FSTYPE}":"ext4" }, { "{#FSNAME}":"/proc/sys/fs/binfmt_misc", "{#FSTYPE}":"binfmt_misc" } ] } ``` ##3.导入模板 Configration-Templates-Import-选择zbx_export_templates_not_bind_dir_LLD.xml ##创建Discovery rule (模板里面已经存在) ![图1](img/000-0.png) ##创建Item prototypes (模板里面已经存在) ![图1](img/000-1.png) ### ##4.将模板Template OS Linux not bind dir链接到bind所在的主机 注意:需要将Template OS Linux去掉(Unlink and clear) #现象以及原因的深度剖析 ### 现在,我们有个需求,就是要去掉对bind-chroot目录的监控,即以下的目录 ``` /var/named/chroot/etc/named /var/named/chroot/var/named /var/named/chroot/etc/named.conf /var/named/chroot/etc/named.rfc1912.zones /var/named/chroot/etc/rndc.key /var/named/chroot/usr/lib64/bind /var/named/chroot/etc/named.iscdlv.key /var/named/chroot/etc/named.root.key ``` ### 在Zabbix的Web界面,可以看到LLD添加的bind-chroot分区监控 ![图1](img/001.png) ![图2](img/002.png) ![图3](img/003.png) ###从代码分析 shell#vim zabbix-2.2.4/src/libs/zbxsysinfo/linux/diskspace.c ``` int VFS_FS_DISCOVERY(AGENT_REQUEST *request, AGENT_RESULT *result) { int ret = SYSINFO_RET_FAIL; char line[MAX_STRING_LEN], *p, *mpoint, *mtype; FILE *f; struct zbx_json j; zbx_json_init(&j, ZBX_JSON_STAT_BUF_LEN); zbx_json_addarray(&j, ZBX_PROTO_TAG_DATA); if (NULL != (f = fopen("/proc/mounts", "r"))) { while (NULL != fgets(line, sizeof(line), f)) { if (NULL == (p = strchr(line, ' '))) continue; mpoint = ++p; if (NULL == (p = strchr(mpoint, ' '))) continue; *p = '\0'; mtype = ++p; if (NULL == (p = strchr(mtype, ' '))) continue; *p = '\0'; zbx_json_addobject(&j, NULL); zbx_json_addstring(&j, "{#FSNAME}", mpoint, ZBX_JSON_TYPE_STRING); zbx_json_addstring(&j, "{#FSTYPE}", mtype, ZBX_JSON_TYPE_STRING); zbx_json_close(&j); } zbx_fclose(f); ret = SYSINFO_RET_OK; } ``` ![图4](img/004.png) ![图5](img/005.png) <file_sep>1.安装oracle的客户端软件包 rpm -ivh oracle-instantclient11.2-basic-11.2.0.3.0-1.x86_64.rpm rpm -ivh oracle-instantclient11.2-devel-11.2.0.3.0-1.x86_64.rpm rpm -ivh oracle-instantclient11.2-sqlplus-11.2.0.3.0-1.x86_64.rpm rpm -ivh cx_Oracle-5.1.2-11g-py26-1.x86_64.rpm rpm -ivh python-argparse-1.2.1-2.el6.noarch.rpm 2.库文件路径 #vim /etc/ld.so.conf.d/oracle.conf #添加如下内容 /usr/lib/oracle/11.2/client64/lib #ldconfig -v 3.脚本配置文件 #cp pyora.py /etc/zabbix/scripts/pyora.py #cp py_oracle.conf /etc/zabbix/zabbix_agentd.conf.d/oracle.conf 4.web导入xml文件ORACLE_zbx_templates.xml host里面设置宏变量 Macro Value {$ADDRESS} 192.168.153.153 {$ARCHIVE} VGDATA {$DATABASE} clouddb {$PASSWORD} <PASSWORD> {$USERNAME} testuser 出自http://bicofino.io/blog/2013/12/09/monitoring-oracle-with-zabbix/ <file_sep>#Percona-Server数据库的二进制安装方法 #下载文件 ``` #wget http://www.percona.com/redir/downloads/Percona-Server-5.6/LATEST/binary/linux/x86_64/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64.tar.gz #tar xf Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64.tar.gz #mv Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64 /usr/local/ #cd /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/ #cp support-files/mysql.server /etc/init.d/mysqld ``` #建立用户 ``` #groupadd -g 27 mysql #useradd -g 27 -s /sbin/nologin mysql ``` #改变权限 ``` #chown -R mysql.mysql /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/ ``` #配置环境变量 ``` #vim ~/.bash_profile PATH=PATH:HOME/bin:/usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/bin ``` 提示:如果路径不为/usr/local,则需要修改启动脚本/etc/init.d/mysqld #启动percona-server服务 注意不能存在文件/etc/my.cnf,否则,由于my.cnf里的不正确配置而导致mysql不能正常启动,因为mysqld脚本里面默认路径会去找/etc/my.cnf这个文件。 ``` #mysqld --verbose --help|grep my.cnf my.cnf将会存在于以下路径,依次为优先级匹配。 /etc/my.cnf /etc/mysql/my.cnf /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/etc/my.cnf ~/.my.cnf ``` 但在测试的时候,发现并未读取 ``` /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/etc/my.cnf ``` #配置my.cnf文件 ``` # cat /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/etc/my.cnf [mysqld] datadir=/usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/data socket=/usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/var/run/mysql.sock user=mysql # Disabling symbolic-links is recommended to prevent assorted security risks symbolic-links=0 character-set-server=utf8 innodb_file_per_table=1 [mysqld_safe] log-error=/usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/var/log/mysqld.log pid-file=/usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/var/run/mysqld/mysqld.pid ``` 以上my.cnf为简单的参数配置,后期还需要对此进行调整 ``` #mkdir -p /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/var/run #mkdir -p /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/var/log #mkdir -p /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/etc ``` #初始化mysql ``` #mkdir /opt/bak #mv /etc/my.cnf /opt/bak #./scripts/mysql_install_db \ --user=mysql \ --basedir=/usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/ \ --datadir=/usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/data/ #./bin/mysqld_safe & #改变权限 ``` #chown -R mysql.mysql /usr/local/Percona-Server-5.6.15-rel63.0-519-static-openssl-1.0.1e.Linux.x86_64/ ``` #chkconfig mysqld on #/etc/init.d/mysqld start ``` <file_sep>#!/bin/bash # function:monitor nginx from zabbix # License: GPL # mail:<EMAIL> # version 1.0 date:2012-12-09 # version 1.0 date:2013-01-15 #nginx.confÅäÖÃÈçÏ ###################################################################### # server { # listen 127.0.0.1:80; # server_name 127.0.0.1; # location /nginxstatus { # stub_status on; # access_log off; # allow 127.0.0.1; # allow 192.168.11.0/24; # deny all; # } # location ~ ^/(phpfpmstatus)$ { # include fastcgi_params; # fastcgi_pass unix:/tmp/fpm.sock; # fastcgi_param SCRIPT_FILENAME $fastcgi_script_name; # } # } ###################################################################### #HOST=$(ifconfig eth0 | sed -n '/inet /{s/.*addr://;s/ .*//;p}') #PORT="80" source /etc/bashrc >/dev/null 2>&1 source /etc/profile >/dev/null 2>&1 nginx_conf_path=/etc/nginx/nginx.conf nginx_site_dicovery() { NGX_WEB_SITE=($(awk '/server_name / {print $2}' ${nginx_conf_path}|sed "s/;//")) [ ${#NGX_WEB_SITE[@]} == 0 ] && echo "" && exit 1 printf '{\n' printf '\t"data":[\n' for((i=0;i<${#NGX_WEB_SITE[@]};++i)) { num=$(echo $((${#NGX_WEB_SITE[@]}-1))) if [ "$i" != ${num} ]; then printf "\t\t{ \n" printf "\t\t\t\"{#NGX_SITE_NAME}\":\"${NGX_WEB_SITE[$i]}\"},\n" else printf "\t\t{ \n" printf "\t\t\t\"{#NGX_SITE_NAME}\":\"${NGX_WEB_SITE[$num]}\"}]}\n" fi } } # Functions to return nginx stats function active { curl "http://$1/nginxstatus" | grep 'Active' | awk '{print $3}' } function reading { curl "http://$1/nginxstatus" | grep 'Reading' | awk '{print $2}' } function writing { curl "http://$1/nginxstatus" | grep 'Writing' | awk '{print $4}' } function waiting { curl "http://$1/nginxstatus" | grep 'Waiting' | awk '{print $6}' } function accepts { curl "http://$1/nginxstatus" | awk NR==3 | awk '{print $1}' } function handled { curl "http://$1/nginxstatus" | awk NR==3 | awk '{print $2}' } function requests { curl "http://$1/nginxstatus" | awk NR==3 | awk '{print $3}' } case "$1" in nginx_site_dicovery) nginx_site_dicovery ;; active) active $2 ;; reading) reading $2 ;; writing) writing $2 ;; waiting) waiting $2 ;; accepts) accepts $2 ;; handled) handled $2 ;; requests) requests $2 ;; *) echo "Usage: $0 {nginx_site_dicovery}" echo "Usage: $0 {active [host]|reading [host]|writing [host]|waiting [host]|accepts [host]|handled [host]|requests [host]}" esac <file_sep>#!/bin/bash # function:monitor redisstatus from zabbix # License: GPL # mail:<EMAIL> # version:1.0 date:2013-02-04 #chmod 4755 $(which netstat) redis_discovery () { port=($(netstat -nlput|awk -F":" '/redis/ {print $0}'|awk -F: '{print $2}'|awk '{print $1}'|grep -v "^$")) [ "${port[0]}" == "" ] && exit printf '{\n' printf '\t"data":[\n' for((i=0;i<${#port[@]};++i)) { num=$(echo $((${#port[@]}-1))) if [ "$i" != ${num} ]; then printf "\t\t{ \n" printf "\t\t\t\"{#REDISPORT}\":\"${port[$i]}\"},\n" else printf "\t\t{ \n" printf "\t\t\t\"{#REDISPORT}\":\"${port[$num]}\"}]}\n" fi } } case "$1" in redis_discovery) redis_discovery ;; *) echo "Usage: $0 {redis_discovery}" ;; esac <file_sep>##《Zabbix企业级分布式监控系统》 本项目是《Zabbix企业级分布式监控系统》一书的附件部分(不仅限于附件,还有扩充内容),该书可在各大网站买到。 <table> <tr> <td>网站名称</td> <td>链接</td> </tr> <tr> <td>亚马逊</td> <td>http://www.amazon.cn/3/dp/B00MN6QEYK</td> </tr> <tr> <td>china-pub</td> <td>http://product.china-pub.com/4275086</td> </tr> <tr> <td>京东</td> <td>http://t.cn/RPY5JtR</td> </tr> <tr> <td>当当</td> <td>http://t.cn/RPHauKF</td> </tr> </tr> <tr> <td>官方收录地址</td> <td>http://www.zabbix.com/documentation.php</td> </tr> </table> === QQ交流群 ``` 本书的读者QQ群【Zabbix群加群验证信息:Zabbix监控】 : Zabbix企业级分布式监控 2群 189342378(500人可加) Zabbix企业级分布式监控 1群 271659981(1000人已满) ``` ![图1](static/img/zabbix-QQ-group-1.jpeg) === 勘误列表 ``` https://github.com/itnihao/zabbix-book/blob/master/error-fix/README.md ``` === 该书的目录如下: 第一部分Zabbix基础 = 第一章,监控系统简介 ==================================== ``` 1.1为何需要监控系统 1.2监控系统的实现 1.3监控系统的开源软件现状 1.3.1 MRTG 1.3.2 Cacti 1.3.3 SmokePing 1.3.4 Graphite 1.3.5 Nagios 1.3.6 Zenoss Core 1.3.7 Ganglia 1.3.8 OpenTSDB 1.3.9 Zabbix 1.4监控系统的原理探究 ``` 第二章,Zabbix简介 == ``` 2.1 Zabbix的客户 2.2 使用Zabbix的准备 2.3 Zabbix为何物 2.4 选择Zabbix的理由 2.5 Zabbix的架构 2.6 zabbix运行流程 2.7 Zabbix功能特性 ``` 第三章,安装部署 == ``` 3.1 安装环境概述 3.1.1 硬件条件 3.1.2 软件条件 3.1.3 部署环境的考虑 3.2 zabbix_server服务端的安装 3.2.1 安装 Zabbix-Server 3.2.2 安装 MySQL 数据库服务 3.2.3 配置 zabbix_server.conf 3.2.4 防火墙、Selinux 和权限的设置 3.2.5 配置 Web 界面 3.2.6 故障处理 .... 3.3 zabbix-agent客户端的安装 3.3.1 安装 Zabbix-Agent 3.3.2 防火墙的设置 3.3.3 配置 zabbix_agentd.conf 3.4 snmp监控方式的安装配置 3.5 Windows上安装zabbix-agent 3.6 其他平台的安装 3.7 zabbix_get的使用 3.8 zabbix术语(命令)相关 3.9 Zabbix-server对数据的存储 3.9.1 Zabbix 对数据存储 3.9.2 MySQL 表分区实例 3.10 Zabbix init脚本解释 3.11安全和高可用 3.12zabbix数据库的备份 ``` 第四章,快速配置使用 == ``` 4.1配置流程 4.2主机组的添加 4.3模板的添加 4.4添加主机 4.5 Graphs的配置 4.6 screen的配置 4.7 Slide shows的配置 4.8 zatree的使用 4.9 map的配置 4.10 WEB监控 4.10.1 Web 监控的原理 4.10.2 Web 监控指标 4.10.3 Zabbix 中 Web 监控的配置 4.10.4 认证的支持 4.10.5 触发器的设置 4.10.6 排错 4.11 IT服务 4.12报表 4.13资产管理 ``` 第五章,深入配置使用 == ``` 5.1 Items的添加 5.1.1 Items 的含义 5.1.2 如何添加 Items 5.2 Items key的添加 5.3 ITEMS的类型 5.3.1 Zabbix-Agent 5.3.2 Simple check 5.3.3 日志监控方式 5.3.4 监控项计算(Calculated) 5.3.5 聚合检测(Aggregate) 5.3.6 内部检测(Internal) 5.3.7 SSH、Telnet 和扩展检测 . 5.4宏的配置 5.5维护时间 5.6事件确认 5.7数据的导入导出配置 ``` 第六章 ,告警的配置 == ``` 6.1 告警的概述 6.2Trigger的配置 6.2.1 Trigger 的状态 6.2.2 Trigger 的配置步骤 6.2.3 Trigger 告警依赖 6.2.4 Trigger 正则中的单位 6.2.5 Trigger 表达式举例 6.2.6 Trigger 函数 6.3添加 Actions 6.3.1 Actions 概述 6.3.2 Actions 的配置 6.3.3 Conditions 的配置 6.3.4 Operations 的功能 6.3.5 告警消息发送的配置 6.3.6 执行远程命令的配置 6.4邮件告警配置实例 6.4.1 创建 Media 6.4.2 创建用户 6.4.3 创建 Actions 6.4.2创建用户 6.5自定义脚本告警 6.6邮件告警脚本的配置实例 6.7告警升级的机制 6.8告警配置故障排查 ``` 第二部分,zabbix中级部分 = 第七章,监控方式剖析 == ``` 7.1 Zabbix支持的监控方式 7.2 Zabbix监控方式的逻辑 7.3 agent监控方式 7.4 Trapper监控方式 7.4.1 Trapper 的配置步骤 7.4.2 Trapper 的配置示例 7.4.3 使用 zabbix-sender 发送数据 7.5 SNMP监控方式 7.5.1 SNMP 概述 7.5.2 SNMP 协议的运行 7.5.3 SNMP 协议原理 7.5.4 MIB 简 7.5.5 SNMP 的相关术语 7.5.6 配置 Zabbix 以 SNMP 方式监控 7.6 IPMI监控方式 7.7 JMX监控方式 7.7.1 JMX 在 Zabbix 中的运行流程 7.7.2 配置 JMX 监控的步骤 7.7.3 安装 Zabbix-Java-Gateway 7.7.4 配置 Zabbix-Java-Gateway 7.7.5 监控 Java 应用程序 7.7.6 自定义 JMX 的 Key 7.7.7 监控 Tomcat 7.7.8 Weblogic 的监控 7.8命令执行 ``` 第八章,分布式监控 == ``` 8.1代理架构 8.2节点架构 8.3主动模式和被动模式 8.3.1 被动模式 8.3.2 主动模式 ``` 第九章,Zabbix与自动化运维 == ``` 9.1监控自动化 9.2网络发现 9.3主动方式的自动注册 9.3.1 功能概述 9.3.2 主动方式自动注册的配置 9.3.3 使用 Host metadata 9.3.4 关于自动注册的注意事项 9.4 low level discovery 9.4.1 现实案例需求 9.4.2 Zabbix 客户端配置 9.4.3 Low level discovery 自动发现脚本编写 9.4.4 自定义 Key 配置文件 9.4.5 Web 页面添加 Low level discovery . 9.5 Zabbix在自动化运维工具的使用 ``` 第十章,使用的经验技巧 == ``` 10.1如何有效的设置监控告警 10.2监控项的使用技巧 10.3触发器的使用技巧 10.4触发器配置 10.5谷歌浏览器告警插件 10.6数据图断图的原因 ``` 第十一章,监控案例 == ``` 11.1监控tcp连接数 11.2监控nginx 11.3监控php-fpm 11.4监控mysql 11.4.1 用自带的模板监控 MySQL 11.4.2 用 Percona Monitoring Plugins 监控 MySQL . 11.5监控tomcat,weblogic 11.6监控dell服务器 11.7监控Cisco路由器 11.8监控VMware 11.9 hadoop监控 11.10 更多监控 ``` 第三部分Zabbix高级部分 = 第十二章,性能优化 == ``` 12.1Zabbix的性能优化的概述 12.2Zabbix的性能优化的依据 12.3配置文件的参数优化 12.4 Zabbix的架构优化 12.5 Zabbix的items中工作模式以及Trigger的优化 12.6 Zabbix的数据库优化 12.7其他方面的 ``` 第十三章,Zabbix API的使用 == ``` 13.1 Zabbix API简介 13.2什么是json-rpc 12.3Zabbix API的使用流程 13.3.1 使用 API 的基本步骤 13.3.2 如何使用官方文档获取帮助 13.3.3 用 CURL 模拟 API 的使用 13.3.4 HTTP 头部 Content-Type 设置 13.3.5 关于用户认证 13.3.6 获取主机信息(用 Python 写的示例) 13.3.7 添加 Host 13.3.8 删除 Host 13.4第三方zabbixAPI模块 ``` 第十四章,使用Zabbix协议 == ``` 14.1 Zabbix协议概述 14.2 Zabbix sender协议 14.2.1 Sender 数据发送 14.2.2 Server 对数据响应的处理 14.2.3 Zabbix-Sender 的实例 14.2 Zabbix get协议 14.2 Zabbix agent协议 ``` 第十五章,定制Zabbix安装包 == ``` 15.1为什么需要定制安装包 15.2如何定制安装包 ``` 第十六章,大型分布式监控案例 == ``` 16.1监控系统构建的概述 16.2监控环境架构图 16.3架构实现的过程 16.3.1 硬件和软件需求 16.3.2 Zabbix DB 的安装 16.3.3 安装 Zabbix-Server 16.3.4 安装 Zabbix-GUI 16.3.5 安装 Zabbix-Proxy 16.3.6 配置 Zabbix-Agent 16.4业务相关的配置 16.4.1 用户的配置 16.4.2 业务组的配置 16.4.3 监控模板的定制 16.4.4 自动发现的配置 16.5其他需求 ``` 第四部分 附录 = 第十七章,源码安装及相关配置 == ``` 17.1安装zabbix-server 17.2 Zabbix-agent的安装 17.3 关于zabbix的升级 ``` === 开源文档 ``` 《Zabbix使用手册V2.0》下载地址http://pan.baidu.com/s/1qWDHXkK 提取密码为<PASSWORD> ``` <file_sep>#!/bin/bash # function:monitor tcp connect status from zabbix # License: GPL # mail:<EMAIL> # version:1.0 date:2013-01-17 source /etc/bashrc >/dev/null 2>&1 source /etc/profile >/dev/null 2>&1 IPADDR="www.itnihao.com" PORT="80" DATE=`date +%Y%m%d` cd /tmp/ wget --http-user=itnihao --http-password=<PASSWORD> $IPADDR:$PORT/backend/haproxy-stats\;csv -O haproxy_stats_$DATE.discovery.csv -o /dev/null FILE="/tmp/haproxy_stats_$DATE.discovery.csv" haproxy_front_discovery () { FRONTEND=($(awk -F"," '/FRONTEND/ {print $1}' ${FILE})) printf '{\n' printf '\t"data":[\n' for((i=0;i<${#FRONTEND[@]};++i)) { num=$(echo $((${#FRONTEND[@]}-1))) if [ "$i" != ${num} ]; then printf "\t\t{ \n" printf "\t\t\t\"{#FRONTEND}\":\"${FRONTEND[$i]}\"},\n" else printf "\t\t{ \n" printf "\t\t\t\"{#FRONTEND}\":\"${FRONTEND[$num]}\"}]}\n" fi } } haproxy_backend_discovery () { BACKEND=($(awk -F"," '/BACKEND/ {print $1}' ${FILE})) printf '{\n' printf '\t"data":[\n' for((i=0;i<${#BACKEND[@]};++i)) { num=$(echo $((${#BACKEND[@]}-1))) if [ "$i" != ${num} ]; then printf "\t\t{ \n" printf "\t\t\t\"{#BACKEND}\":\"${BACKEND[$i]}\"},\n" else printf "\t\t{ \n" printf "\t\t\t\"{#BACKEND}\":\"${BACKEND[$num]}\"}]}\n" fi } } haproxy_backendhost_discovery () { BACKENDHOST=($(awk -F"," '($2 !~ /BACKEND|FRONTEND/ && $1 !~ /#/) {print $2}' ${FILE}|sort|uniq)) printf '{\n' printf '\t"data":[\n' for((i=0;i<${#BACKENDHOST[@]};++i)) { num=$(echo $((${#BACKENDHOST[@]}-1))) if [ "$i" != ${num} ]; then printf "\t\t{ \n" printf "\t\t\t\"{#BACKENDHOST}\":\"${BACKENDHOST[$i]}\"},\n" else printf "\t\t{ \n" printf "\t\t\t\"{#BACKENDHOST}\":\"${BACKENDHOST[$num]}\"}]}\n" fi } } case "$1" in haproxy_front_discovery) haproxy_front_discovery ;; haproxy_backend_discovery) haproxy_backend_discovery ;; haproxy_backendhost_discovery) haproxy_backendhost_discovery ;; *) echo "Usage:$0 {haproxy_front_discovery|haproxy_backend_discovery|haproxy_backendhost_discovery}" ;; esac <file_sep>=============== 1.本RPM包,只支持centos6.X RHEL6.X,如果其他版本系统,需自行rpmbuild. php安装之前需卸载系统自带的php版本 卸载命令为:rpm -qa|grep php|xargs rpm -e rpm -ivh \ libicu-4.2.1-9.1.el6_2.x86_64.rpm \ libmcrypt-2.5.8-9.el6.x86_64.rpm \ libjpeg-turbo-1.2.1-1.el6.x86_64.rpm \ libtidy-0.99.0-19.20070615.1.el6.x86_64.rpm \ php-5.4.25-1.el6.x86_64.rpm 默认已开启php-fpm服务(service php-fpm start) 默认让php-fpm用sock方式,sock文件路径为/var/run/php/php-fpm.sock 以下软件为依赖包,在RHEL系统中需要额外下载 libicu-4.2.1-9.1.el6_2.x86_64.rpm libmcrypt-2.5.8-9.el6.x86_64.rpm libjpeg-turbo-1.2.1-1.el6.x86_64.rpm libtidy-0.99.0-19.20070615.1.el6.x86_64.rpm <file_sep>#scripts and conf ``` /etc/zabbix/scripts/web_site_code_status /etc/zabbix/zabbix_agentd.conf.d/web_site_code.conf mkdir /etc/zabbix/scripts/website/ touch /etc/zabbix/scripts/website/website.txt echo "www.baidu.com" >> /etc/zabbix/scripts/website/website.txt ``` #template import-web-monitor.xml <file_sep>#!/bin/bash # Script to fetch haproxy statuses for tribily monitoring systems # Author: <EMAIL> # License: GPLv2 # Set Variables IPADDR="www.itnihao.com" PORT="80" DATE=`date +%Y%m%d` #http://www.itnihao.com/backend/haproxy-stats;csv cd /tmp/ wget --http-user=itnihao --http-password=<PASSWORD> $IPADDR:$PORT/backend/haproxy-stats\;csv -O haproxy_stats_$DATE.host.csv -o /dev/null FILE="/tmp/haproxy_stats_$DATE.host.csv" # Write the functions # Status of Servers function host_status { grep "$1," $FILE | cut -f18 -d,|sort|uniq } # Queue Informations function host_qcur { grep "$1," $FILE | cut -f3 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } function host_qmax { grep "$1," $FILE | cut -f4 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } # Session Informations function host_scur { grep "$1," $FILE | cut -f5 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } function host_smax { grep "$1," $FILE | cut -f6 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } function host_stot { grep "$1," $FILE | cut -f8 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } # Traffic Information function host_bytes_in { grep "$1," $FILE | cut -f9 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } function host_bytes_out { grep "$1," $FILE | cut -f10 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } # Error Information function host_err_conn { grep "$1," $FILE | cut -f14 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } function host_err_resp { grep "$1," $FILE | cut -f15 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } # Warning Information function host_warn_retr { grep "$1," $FILE | cut -f16 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } function host_warn_redis { grep "$1," $FILE | cut -f17 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } # Downtime Information function host_down_cur { STATUS=$(grep "$1," $FILE | cut -f18 -d,|sort|uniq) if [ "$STATUS" == "DOWN" ] then grep "$1," $FILE | cut -f24 -d,|sort|uniq else echo "0" fi } function host_down_tot { grep "$1," $FILE | cut -f25 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' } # Uptime Information function host_uptime_cur { STATUS=`grep "$1," $FILE | cut -f18 -d,`|sort|uniq if [ "$STATUS" == "UP" ] then grep "$1," $FILE | cut -f24 -d,|awk 'BEGIN{total=0}{total=total+$1}END{print total }' else echo "0" fi } # Version Information # Donot edit this section ever function tribily_ver { VERSION="1.0" echo ${VERSION} } # Run the requested function case "$1" in host_status) host_status $2 ;; host_qcur) host_qcur $2 ;; host_qmax) host_qmax $2 ;; host_scur) host_scur $2 ;; host_smax) host_smax $2 ;; host_stot) host_stot $2 ;; host_bytes_in) host_bytes_in $2 ;; host_bytes_out) host_bytes_out $2 ;; host_err_conn) host_err_conn $2 ;; host_err_resp) host_err_resp $2 ;; host_warn_retr) host_warn_retr $2 ;; host_warn_redis) host_warn_redis $2 ;; host_down_cur) host_down_cur $2 ;; host_down_tot) host_down_tot $2 ;; host_uptime_cur) host_uptime_cur $2 ;; *) echo "Useage $0 {host_status|host_qcur|host_qmax|host_scur|host_smax|host_stot|host_bytes_in|host_bytes_out|host_err_conn|host_err_resp|host_warn_retr|host_warn_redis|host_down_cur|host_down_tot|host_uptime_cur} " ;; esac # Clean up #/bin/rm $FILE <file_sep>##《Zabbix企业级分布式监控系统》勘误列表 前言 陈益超改为陈艺超 P22 362+67+4=433 @一期一会 P40 /etc/snmp/snmpd.conf P47 trends_unit(存储非符号的整数)更正为trends_uint表。 P56 将在(16.3)节中 P62 图web app少了下划线 P72 只有部分触发器(函数) P102 表 5-1 Name 选项,**$1、$2…$9** 指的是 Item Key 的第 **1、2…9** 个参数,原文说是「Item 名称」的参数。 P103 表 5-1 Units 选项,与时间相关的单位 **unixtime**、**uptime**、**s** 全部是小写字母。首字母大写导致显示时不能正常转换。 P124 开始部分应该概述功能,第二版增加 P143 '15'(天)代表'86400s'(秒)更正为'1d'(天)代表'86400s'(秒)。@jun-东莞-运维 P153 表6-5 Recovery message写了两遍 @Miku酱 P161 表6-7的Description应该为Name @cexpert P202 缺少proxy的数据同步参数,第二版增加 P203 创建的数据库名称应该为 ``` shell# service mysqld start shell# mysql -uroot -p mysql> use zabbix_proxy; mysql> create database zabbix_proxy character set utf8; mysql> grant all privileges on zabbix_proxy.* to zabbix@localhost identified by 'zabbix'; mysql> flush privileges; ``` @唐文军 P255 nginx监控脚本已经调整,见github-book 11章 fix P256 黑体字是一行的 P265 使用的MySQL脚本需要改变,即261页的MySQL脚本需要改变,见github-book 11章 fix <file_sep>#!/bin/bash # function:monitor lvs connect status from zabbix # License: GPL # mail:<EMAIL> # version:1.0 date:2013-01-22 ActiveConn () { awk '/->/ {sum+=$5} END { print sum-1}' /proc/net/ip_vs } InActConn () { awk '/->/ {sum+=$6} END { print sum-1}' /proc/net/ip_vs } HostNum () { LVSLOG=$(mktemp /var/log/zabbix/lvs.XXXXXX) for name in $(awk '/->/ {print $2}' /proc/net/ip_vs|sort|uniq) do echo $name | awk '{for(i=1;i<=NF;i++) $i=strtonum("0x"$i);print}' FIELDWIDTHS="2 2 2 2" OFS="." >>${LVSLOG} done sort ${LVSLOG}|uniq|wc -l rm ${LVSLOG} } VipPortNum () { awk -F" |:" '/TCP/ {print $4}' /proc/net/ip_vs|sort|uniq|wc -l } UsedHostNum () { awk '/->/ && ($2 ~ "[0-9]") && ($5 !~ 0) {print $0}' /proc/net/ip_vs|wc -l } case "$1" in ActiveConn) ActiveConn ;; InActConn) InActConn ;; HostNum) HostNum ;; VipPortNum) VipPortNum ;; UsedHostNum) UsedHostNum ;; *) echo "Usage $0 {ActiveConn|InActConn|host_num|vip_port_num}" esac #awk '/->/ ($2 != RemoteAddress) {sum+=$4} END { print "Sum = ",sum}' /proc/net/ip_vs #echo '172.16.31.10' | awk -F'.' '{ for(i=1;i<=NF;i++){printf("%X", $i);if(i!=NF){printf("")}} }' #echo "6540B2C6" | awk '{for(i=1;i<=NF;i++) $i=strtonum("0x"$i);print}' FIELDWIDTHS="2 2 2 2" OFS="." <file_sep>#!/usr/bin/python import socket import optparse import sys import struct class ZBXDProtocol(): MAX_KEY_LENGTH = 65536 HEADER = b'ZBXD\1' HEADER_LENGTH = 5 EXPECTED_LENGTH_SIZE = 8 RESPONSE_FORMAT = "<5sq{data_length}s" def receive_value(self, client): """ Receives key and returns it Expects to receive header followed by the length of the key followed by the key. """ received = client.recv(self.HEADER_LENGTH) if received == self.HEADER: expected_length = struct.unpack( 'q', client.recv(self.EXPECTED_LENGTH_SIZE) )[0] key = client.recv(expected_length) else: if '\n' in received: key = received else: key = received + client.recv(self.MAX_KEY_LENGTH) return key.decode('utf-8') def send_value(self, client, value): """ Formats value according to protocol and sends it to client """ message = self._calculate_message(value) client.sendall(message) def _calculate_message(self, value): formatted_value = self._format(value) data_length = len(formatted_value) response = struct.pack( self.RESPONSE_FORMAT.format(data_length=data_length), self.HEADER, data_length, formatted_value ) return response def _format(self, value): if isinstance(value, float): formatted_value = '{0:.4f}'.format(value) else: formatted_value = str(value) return formatted_value option_parser = optparse.OptionParser() option_parser.add_option('-s', '--host', default='127.0.0.1', help='host name or IP address of a host') option_parser.add_option('-p', '--port', type=int, default=10052, help='port number of agent running on the host') option_parser.add_option('-t', '--timeout', type=float, default=1.0, help='socket timeout') options, arguments = option_parser.parse_args() if len(arguments) == 0: print("You must provide key") sys.exit() key = arguments[0] client_socket = None try: client_socket = socket.create_connection((options.host, options.port), options.timeout) protocol = ZBXDProtocol() protocol.send_value(client_socket, key) print(protocol.receive_value(client_socket)) except Exception as e: print("Unable to receive data from agent: {0}".format(e)) finally: if client_socket is not None: client_socket.close() <file_sep>#!/usr/bin/env python # coding=utf8 # Last modified: 2014-08-02 14:11 # Author: itnihao # Mail: <EMAIL> import json devices_arry = [] f=open('/proc/mounts','r') mounts_arry=f.read().split("\n") for line in mounts_arry: if len(line) > 0: fs_arry=line.split() result=fs_arry[1].find('chroot') if result == -1: devices_arry += [{"{#FSNAME}":fs_arry[1],"{#FSTYPE}":fs_arry[2]}] print json.dumps({'data':devices_arry},sort_keys=True,indent=7,separators=(',',':')) <file_sep>#Ubuntu下搭建Zabbix #一、操作系统的安装 安装Ubuntu,过程略 #二、DEB源的配置 参考文档 ``` https://www.zabbix.com/documentation/2.2/manual/installation/install_from_packages ``` 各版本的配置信息如下 ``` Zabbix 2.2 for Debian 6: # wget http://repo.zabbix.com/zabbix/2.2/debian/pool/main/z/zabbix-release/zabbix-release_2.2-1+squeeze_all.deb # dpkg -i zabbix-release_2.2-1+squeeze_all.deb # apt-get update Zabbix 2.2 for Debian 7: # wget http://repo.zabbix.com/zabbix/2.2/debian/pool/main/z/zabbix-release/zabbix-release_2.2-1+wheezy_all.deb # dpkg -i zabbix-release_2.2-1+wheezy_all.deb # apt-get update Zabbix 2.2 for Ubuntu 12.04 LTS: # wget http://repo.zabbix.com/zabbix/2.2/ubuntu/pool/main/z/zabbix-release/zabbix-release_2.2-1+precise_all.deb # dpkg -i zabbix-release_2.2-1+precise_all.deb # apt-get update Zabbix 2.2 for Ubuntu 14.04 LTS: # wget http://repo.zabbix.com/zabbix/2.2/ubuntu/pool/main/z/zabbix-release/zabbix-release_2.2-1+trusty_all.deb # dpkg -i zabbix-release_2.2-1+trusty_all.deb # apt-get update #cat /etc/apt/sources.list.d/zabbix.list #源里面的记录如下 deb http://repo.zabbix.com/zabbix/2.2/ubuntu trusty main deb-src http://repo.zabbix.com/zabbix/2.2/ubuntu trusty main ``` 安装Zabbix-Server 3.1、安装Zabbix-Server ``` # sudo apt-get install zabbix-server-mysql php5-mysql zabbix-frontend-php ``` 3.2、配置zabbix_server.conf ``` # vi /etc/zabbix/zabbix_server.conf DBHost=localhost DBName=zabbix DBUser=zabbix DBPassword=<PASSWORD> ``` 3.3、设置开机启动项 ``` #vim /etc/default/zabbix-server START=yes ``` 3.4、启动zabbix-server服务 ``` #sudo service zabbix-server start ``` 3.5、启动MySQL服务 ``` #sudo service mysql start ``` 3.6、创建Zabbix数据库 ``` # mysql -uroot mysql> create database zabbix character set utf8 collate utf8_bin; mysql> grant all privileges on zabbix.* to zabbix@localhost identified by 'zabbix'; mysql> flush privileges; ``` 3.7、导入Zabbix数据库 ``` #cd /usr/share/zabbix-server-mysql/ #sudo gunzip *.gz #mysql -u zabbix -p zabbix < schema.sql #mysql -u zabbix -p zabbix < images.sql #mysql -u zabbix -p zabbix < data.sql #sudo cp /usr/share/doc/zabbix-frontend-php/examples/zabbix.conf.php.example /etc/zabbix/zabbix.conf.php #sudo cp /usr/share/doc/zabbix-frontend-php/examples/apache.conf /etc/apache2/sites-enabled/apache.conf # cat /etc/apache2/sites-enabled/apache.conf # Define /zabbix alias, this is the default <IfModule mod_alias.c> php_value max_execution_time 300 php_value memory_limit 128M php_value post_max_size 16M php_value upload_max_filesize 2M php_value max_input_time 300 php_value date.timezone Europe/Riga Alias /zabbix /usr/share/zabbix </IfModule> ``` 启动apache ``` #sudo service apache2 restart ``` 3.8、配置Zabbix Web 配置Web的过程略,需要注意事项。 由于apache以www-data用户启动,而配置文件需要写道/etc/zabbix目录,所以需要对其授权。 ``` #chown www-data.www.data /etc/zabbix -R ``` 在web页面安装完成后,可以将其权限改为Zabbix用户所有。 ``` #chown zabbix.zabbix /etc/zabbix -R ``` 配置完成后, ``` #cat /etc/zabbix/zabbix.conf.php <?php // Zabbix GUI configuration file global $DB; $DB['TYPE'] = 'MYSQL'; $DB['SERVER'] = 'localhost'; $DB['PORT'] = '3306'; $DB['DATABASE'] = 'zabbix'; $DB['USER'] = 'zabbix'; $DB['PASSWORD'] = '<PASSWORD>'; // SCHEMA is relevant only for IBM_DB2 database $DB['SCHEMA'] = ''; $ZBX_SERVER = 'localhost'; $ZBX_SERVER_PORT = '10051'; $ZBX_SERVER_NAME = 'zabbix-web-ui'; $IMAGE_FORMAT_DEFAULT = IMAGE_FORMAT_PNG; ?> ``` 安装Zabbix-Agent ``` #sudo apt-get update #sudo apt-get install zabbix-agent #sudo vim /etc/zabbix/zabbix_agentd.conf Server=10.10.10.10 Hostname=Web-DB-001 #sudo service zabbix-agent restart ``` 登录Zabbix Web 访问http://10.10.10.10/zabbix Username = Admin Password = <PASSWORD> <file_sep>#scripts and conf ``` #/etc/zabbix/scripts/check_harddisk.sh #/etc/zabbix/zabbix_agentd.conf.d/userparameter_harddisk.conf chown zabbix.zabbix /etc/zabbix/scripts/check_harddisk.sh chmod 755 /etc/zabbix/scripts/check_harddisk.sh ``` #template import-inux_disk_io_template.xml <file_sep>关于Redis的监控,请参考 http://redis.readthedocs.org/en/latest/server/info.html ###INFO [section] 以一种易于解释(parse)且易于阅读的格式,返回关于 Redis 服务器的各种信息和统计数值。 通过给定可选的参数 section ,可以让命令只返回某一部分的信息: ``` server : 一般 Redis 服务器信息,包含以下域: redis_version : Redis 服务器版本 redis_git_sha1 : Git SHA1 redis_git_dirty : Git dirty flag os : Redis 服务器的宿主操作系统 arch_bits : 架构(32 或 64 位) multiplexing_api : Redis 所使用的事件处理机制 gcc_version : 编译 Redis 时所使用的 GCC 版本 process_id : 服务器进程的 PID run_id : Redis 服务器的随机标识符(用于 Sentinel 和集群) tcp_port : TCP/IP 监听端口 uptime_in_seconds : 自 Redis 服务器启动以来,经过的秒数 uptime_in_days : 自 Redis 服务器启动以来,经过的天数 lru_clock : 以分钟为单位进行自增的时钟,用于 LRU 管理 clients : 已连接客户端信息,包含以下域: connected_clients : 已连接客户端的数量(不包括通过从属服务器连接的客户端) client_longest_output_list : 当前连接的客户端当中,最长的输出列表 client_longest_input_buf : 当前连接的客户端当中,最大输入缓存 blocked_clients : 正在等待阻塞命令(BLPOP、BRPOP、BRPOPLPUSH)的客户端的数量 memory : 内存信息,包含以下域: used_memory : 由 Redis 分配器分配的内存总量,以字节(byte)为单位 used_memory_human : 以人类可读的格式返回 Redis 分配的内存总量 used_memory_rss : 从操作系统的角度,返回 Redis 已分配的内存总量(俗称常驻集大小)。这个值和 top 、 ps 等命令的输出一致。 used_memory_peak : Redis 的内存消耗峰值(以字节为单位) used_memory_peak_human : 以人类可读的格式返回 Redis 的内存消耗峰值 used_memory_lua : Lua 引擎所使用的内存大小(以字节为单位) mem_fragmentation_ratio : used_memory_rss 和 used_memory 之间的比率 mem_allocator : 在编译时指定的, Redis 所使用的内存分配器。可以是 libc 、 jemalloc 或者 tcmalloc 。 在理想情况下, used_memory_rss 的值应该只比 used_memory 稍微高一点儿。 当 rss > used ,且两者的值相差较大时,表示存在(内部或外部的)内存碎片。 内存碎片的比率可以通过 mem_fragmentation_ratio 的值看出。 当 used > rss 时,表示 Redis 的部分内存被操作系统换出到交换空间了,在这种情况下,操作可能会产生明显的延迟。 Because Redis does not have control over how its allocations are mapped to memory pages, high used_memory_rss is often the result of a spike in memory usage. 当 Redis 释放内存时,分配器可能会,也可能不会,将内存返还给操作系统。 如果 Redis 释放了内存,却没有将内存返还给操作系统,那么 used_memory 的值可能和操作系统显示的 Redis 内存占用并不一致。 查看 used_memory_peak 的值可以验证这种情况是否发生。 persistence : RDB 和 AOF 的相关信息 stats : 一般统计信息 replication : 主/从复制信息 cpu : CPU 计算量统计信息 commandstats : Redis 命令统计信息 cluster : Redis 集群信息 keyspace : 数据库相关的统计信息 除上面给出的这些值以外,参数还可以是下面这两个: all : 返回所有信息 default : 返回默认选择的信息 当不带参数直接调用 INFO 命令时,使用 default 作为默认参数。 不同版本的 Redis 可能对返回的一些域进行了增加或删减。 因此,一个健壮的客户端程序在对 INFO 命令的输出进行分析时,应该能够跳过不认识的域,并且妥善地处理丢失不见的域。 可用版本: >= 1.0.0 时间复杂度: O(1) 返回值: ``` 具体请参见下面的测试代码。 ``` redis-cli -p 6383 127.0.0.1:6383> info # Server redis_version:2.8.8 redis_git_sha1:00000000 redis_git_dirty:0 redis_build_id:bafd09db0ea64ad0 redis_mode:standalone os:Linux 3.11.0-24-generic x86_64 arch_bits:64 multiplexing_api:epoll gcc_version:4.6.3 process_id:13724 run_id:05f48727373c94eae75b2634eb6cd99ff04c5592 tcp_port:6383 uptime_in_seconds:1329486 uptime_in_days:15 hz:10 lru_clock:2881420 config_file:/etc/redis/6383.conf # Clients connected_clients:267 client_longest_output_list:0 client_biggest_input_buf:0 blocked_clients:0 # Memory used_memory:2597003592 used_memory_human:2.42G used_memory_rss:3247697920 used_memory_peak:5300846120 used_memory_peak_human:4.94G used_memory_lua:29696 mem_fragmentation_ratio:1.25 mem_allocator:jemalloc-3.2.0 # Persistence loading:0 rdb_changes_since_last_save:1408782845 rdb_bgsave_in_progress:0 rdb_last_save_time:1410866014 rdb_last_bgsave_status:ok rdb_last_bgsave_time_sec:33 rdb_current_bgsave_time_sec:-1 aof_enabled:0 aof_rewrite_in_progress:0 aof_rewrite_scheduled:0 aof_last_rewrite_time_sec:-1 aof_current_rewrite_time_sec:-1 aof_last_bgrewrite_status:ok aof_last_write_status:ok # Stats total_connections_received:13209166 total_commands_processed:10099828580 instantaneous_ops_per_sec:2837 rejected_connections:36242 sync_full:0 sync_partial_ok:0 sync_partial_err:0 expired_keys:16591179 evicted_keys:0 keyspace_hits:2077408344 keyspace_misses:2433838286 pubsub_channels:0 pubsub_patterns:0 latest_fork_usec:97455 # Replication role:master connected_slaves:0 master_repl_offset:0 repl_backlog_active:0 repl_backlog_size:1048576 repl_backlog_first_byte_offset:0 repl_backlog_histlen:0 # CPU used_cpu_sys:88227.02 used_cpu_user:74898.65 used_cpu_sys_children:6.00 used_cpu_user_children:47.59 # Keyspace db0:keys=594128,expires=578561,avg_ttl=74150277 db5:keys=755192,expires=144,avg_ttl=63212667 db12:keys=6,expires=0,avg_ttl=0 ``` <file_sep>#!/bin/bash #author: itnihao #mail: <EMAIL> #http://wwww.itnihao.com #https://github.com/itnihao/zabbix-book/blob/master/03-chapter/Zabbix_MySQLdump_per_table.sh source /etc/bashrc source /etc/profile MySQL_USER=zabbix MySQL_PASSWORD=<PASSWORD> MySQL_HOST=localhost MySQL_PORT=3306 MySQL_DUMP_PATH=/mysql_backup MySQL_DATABASE_NAME=zabbix DATE=$(date '+%Y-%m-%d') [ -d ${MySQL_DUMP_PATH} ] || mkdir ${MySQL_DUMP_PATH} cd ${MySQL_DUMP_PATH} [ -d logs ] || mkdir logs [ -d ${DATE} ] || mkdir ${DATE} cd ${DATE} TABLE_NAME_ALL=$(mysql -u${MySQL_USER} -p${MySQL_PASSWORD} -P${MySQL_PORT} -h${MySQL_HOST} ${MySQL_DATABASE_NAME} -e "show tables"|egrep -v "(Tables_in_zabbix|history*|trends*|acknowledges|alerts|auditlog|events|service_alarms)") for TABLE_NAME in ${TABLE_NAME_ALL} do mysqldump -u${MySQL_USER} -p${MySQL_PASSWORD} -P${MySQL_PORT} -h${MySQL_HOST} ${MySQL_DATABASE_NAME} ${TABLE_NAME} >${TABLE_NAME}.sql sleep 1 done [ "$?" == 0 ] && echo "${DATE}: Backup zabbix succeed" >> ${MySQL_DUMP_PATH}/logs/ZabbixMysqlDump.log [ "$?" != 0 ] && echo "${DATE}: Backup zabbix not succeed" >> ${MySQL_DUMP_PATH}/logs/ZabbixMysqlDump.log cd ${MySQL_DUMP_PATH}/ rm -rf $(date +%Y-%m-%d --date='5 days ago') exit 0 <file_sep>#!/bin/bash # function:monitor tcp connect status from zabbix # License: GPL # mail:<EMAIL> # version:1.0 date:2012-12-09 # version:1.1 date:2013-01-15 source /etc/bashrc >/dev/null 2>&1 source /etc/profile >/dev/null 2>&1 LOG_FILE=/var/log/zabbix/tcp_connect.log netstat -n|awk '/^tcp/ {++S[$NF]} END {for (a in S) print a,S[a]}'>${LOG_FILE} Permission=$(ls -l ${LOG_FILE} |awk '{print $3}') [ "$Permission" != zabbix ] && chown zabbix.zabbix /var/log/zabbix/* #Functions to return tcp connect status established () { VALUE=$(awk '/ESTABLISHED/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } listen () { VALUE=$(awk '/LISTEN/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } timewait () { VALUE=$(awk '/TIME_WAIT/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } timeclose () { VALUE=$(awk '/TIME_CLOSE/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } finwait1 () { VALUE=$(awk '/FIN_WAIT1/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } finwait2 () { VALUE=$(awk '/FIN_WAIT2/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } synsent () { VALUE=$(awk '/SYN_SENT/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } synrecv () { VALUE=$(awk '/SYN_RECV/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } closewait () { VALUE=$(awk '/CLOSE_WAIT/ {print $2}' ${LOG_FILE}) [ "${VALUE}" != "" ] && echo ${VALUE}|| echo 0 } # Run the requested function case "$1" in established) established ;; listen) listen ;; timewait) timewait ;; timeclose) timeclose ;; finwait1) finwait1 ;; finwait2) finwait2 ;; synsent) synsent ;; synrecv) synrecv ;; closewait) closewait ;; *) echo "Usage: $0 { established|listen|timewait|timeclose|finwait1|finwait2|synsent|synrecv|closewait}" ;; esac <file_sep> <html xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:w="urn:schemas-microsoft-com:office:word" xmlns:dt="uuid:C2F41010-65B3-11d1-A29F-00AA00C14882" xmlns:m="http://schemas.microsoft.com/office/2004/12/omml" <head> <meta name=标题 content=Key> <meta name=关键词 content=""> <meta http-equiv=Content-Type content="text/html; charset=utf-8"> <meta name=ProgId content=Word.Document> <meta name=Generator content="Microsoft Word 14"> <meta name=Originator content="Microsoft Word 14"> <link rel=File-List href="agent-Key.files/filelist.xml"> <title>Key</title> </head> <body bgcolor=white lang=ZH-CN style='tab-interval:21.0pt;text-justify-trim: punctuation'> <div class=WordSection1 style='layout-grid:15.6pt'> <table class=MsoNormalTable border=0 cellspacing=0 cellpadding=0 style='margin-left:2.25pt;border-collapse:collapse;mso-table-layout-alt:fixed; mso-padding-alt:0cm 0cm 0cm 0cm'> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; mso-border-alt:solid #8CACBB .75pt;background:#0212C9;mso-shading:windowtext; mso-pattern:solid #C0CCDD;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Key<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>▲<o:p></o:p></span></p> </td> <td width=118 colspan=30 style='width:118.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#0212C9;mso-shading:windowtext; mso-pattern:solid #C0CCDD;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>描述</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=14 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#0212C9;mso-shading:windowtext; mso-pattern:solid #C0CCDD;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回值</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=46 colspan=9 style='width:45.7pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#0212C9;mso-shading:windowtext; mso-pattern:solid #C0CCDD;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>参数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=159 colspan=8 style='width:159.15pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#0212C9;mso-shading:windowtext; mso-pattern:solid #C0CCDD;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>注释</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>agent.hostname<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=118 colspan=30 style='width:118.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回</span><span lang=EN-US style='font-family:Arial'>agent</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的</span><span lang=EN-US style='font-family:Arial'>hostname<o:p></o:p></span></p> </td> <td width=53 colspan=11 style='width:53.15pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>字符串值</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=20 colspan=6 style='width:19.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>-<o:p></o:p></span></p> </td> <td width=198 colspan=14 style='width:198.0pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回</span><span lang=EN-US style='font-family:Arial'>agentd.conf</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>里面配置的</span><span lang=EN-US style='font-family: Arial'>hostname</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>值</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>agent.ping<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=84 colspan=24 style='width:83.5pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测</span><span lang=EN-US style='font-family:Arial'>agent</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的可用性</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=108 colspan=23 style='width:107.7pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回值为</span><span lang=EN-US style='font-family:Arial'>1</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>可用</span><span lang=EN-US style='font-family:Arial'>, </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>没有值则</span><span lang=EN-US style='font-family: Arial'>agent</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>不可用</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=17 colspan=3 style='width:16.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>-<o:p></o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>触发器设置规则的时候用</span><span lang=EN-US style='font-family:Arial'>nodata() </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>函数检测</span><span lang=EN-US style='font-family:Arial'>agent</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>是否可用</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>agent.version<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=88 colspan=25 style='width:87.95pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Agent</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>版本</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=65 colspan=12 style='width:65.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=55 colspan=13 style='width:54.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>-<o:p></o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例如返回</span><span lang=EN-US style='font-family:Arial'>: 2.2.0<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>kernel.maxfiles<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=88 colspan=25 style='width:87.95pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>OS</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>支持的最大文件打开文件数(文件描述符数量)</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=65 colspan=12 style='width:65.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件描述符数量,整数值</span><span style='font-family:Arial'> <span lang=EN-US><o:p></o:p></span></span></p> </td> <td width=55 colspan=13 style='width:54.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用命令可以查看</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>cat<span style="mso-spacerun:yes"> </span>/proc/sys/fs/file-max<o:p></o:p></span></b></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>sysctl fs.file-max</span></b><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>kernel.maxproc<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=88 colspan=25 style='width:87.95pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>OS</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>支持的最大线程数量</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=65 colspan=12 style='width:65.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=55 colspan=13 style='width:54.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用命令可以查看</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>cat /proc/sys/kernel/pid_max<o:p></o:p></span></b></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>sysctl kernel.pid_max</span></b><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>log[file,&lt;regexp&gt;,&lt;encoding&gt;,&lt;maxlines&gt;,&lt;mode&gt;,&lt;output&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=28 style='width:27.65pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=42 colspan=11 style='width:42.0pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>监控日志文件</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=16 colspan=3 style='width:15.6pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>日志类型</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=221 colspan=45 style='width:220.95pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>file</span></b><span lang=EN-US style='font-family:Arial'> <span style="mso-spacerun:yes"> </span><span style="mso-spacerun:yes"> </span><span style="mso-spacerun:yes"> </span></span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>完整的路径和日志名称</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>regexp</span></b><span lang=EN-US style='font-family:Arial'> - <span style="mso-spacerun:yes"> </span></span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>正则表达式</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>encoding </span></b><span lang=EN-US style='font-family:Arial'>- </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>文件的编码格式</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>maxlines </span></b><span lang=EN-US style='font-family:Arial'>- </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>每秒向</span><span lang=EN-US style='font-family:Arial'>server</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>或者</span><span lang=EN-US style='font-family: Arial'>proxy</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>发送的数据大小,是由</span><span lang=EN-US style='font-family:Arial'><a href="https://www.zabbix.com/documentation/2.2/manual/appendix/config/zabbix_agentd"><span style='color:windowtext;text-decoration:none;text-underline:none'>zabbix_agentd.conf</span></a></span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>中的参数</span><span lang=EN-US style='font-family: Arial'> 'MaxLinesPerSecond' </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>决定的</span><span lang=EN-US style='font-family:Arial'><br> <b>mode</b> -</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选参数</span><span lang=EN-US style='font-family:Arial'>:<br> <span style="mso-spacerun:yes"> </span>all (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>), skip (</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>跳过过期处理的数据</span><span lang=EN-US style='font-family:Arial'>).<br> <b>mode</b></span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>选项是</span><span lang=EN-US style='font-family:Arial'>2.0</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>以上版本支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>output</span></b><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>一个可选的输出格式模板。在</span><span lang=EN-US style='font-family:Arial'>\0</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>转义序列被替换匹配的文本,而</span><span lang=EN-US style='font-family:Arial'>\ n - </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>转义序列被替换为第</span><span lang=EN-US style='font-family:Arial'>N</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>个匹配的组(或者</span><span lang=EN-US style='font-family: Arial'>N</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>超过捕获组数空字符串)。如果</span><span lang=EN-US style='font-family:Arial'>&lt;OUTPUT&gt;</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>为空</span><span lang=EN-US style='font-family: Arial'> - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回一个包含匹配文本的整行。</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>output</span></b><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>是</span><span lang=EN-US style='font-family:Arial'>2.2</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>版本以后支持的</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=113 colspan=3 style='width:113.45pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>必须配置为主动模式,如果没有权限,则会显示不支持</span><span lang=EN-US style='font-family:Arial'><br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>举例</span><span lang=EN-US style='font-family: Arial'>: <br> log[/var/log/syslog]<br> log[/var/log/syslog,error]<br> log[/home/zabbix/logs/logfile,,,100] <o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>logrt[file_pattern,&lt;regexp&gt;,&lt;encoding&gt;,&lt;maxlines&gt;,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=39 colspan=10 style='width:39.2pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>监控经过日志轮询处理过的日志</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=16 colspan=3 style='width:15.6pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>日志类型</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=185 colspan=41 style='width:184.65pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file_pattern - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件的绝对路径或者是文件的正则匹配路径</span><span style='font-family: Arial'> <span lang=EN-US><br> regexp - </span></span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件内容匹配的正则表达式</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>encoding - </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>编码类型</span><span lang=EN-US style='font-family:Arial'><br> maxlines - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>每秒向</span><span lang=EN-US style='font-family:Arial'>Zabbix server or proxy</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>发送的日志文件行数</span><span lang=EN-US style='font-family:Arial'>. </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>这个参数会覆盖</span><span lang=EN-US style='font-family:Arial'><a href="https://www.zabbix.com/documentation/2.0/manual/appendix/config/zabbix_agentd"><span style='color:windowtext;text-decoration:none;text-underline:none'>zabbix_a<o:p></o:p></span></a></span></p> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'><a href="https://www.zabbix.com/documentation/2.0/manual/appendix/config/zabbix_agentd"><span style='color:windowtext;text-decoration:none;text-underline:none'>gentd.conf</span></a></span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>中的</span><span lang=EN-US style='font-family: Arial'>MaxLinesPerSecond'</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>参数</span><span lang=EN-US style='font-family:Arial'><br> mode - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选的参数值</span><span lang=EN-US style='font-family:Arial'>:<br> all (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>, skip (</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>跳过处理之前发送时间内的数据</span><span lang=EN-US style='font-family:Arial'>).<br> Mode</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>参数需要</span><span lang=EN-US style='font-family:Arial'>2.0</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>才支持</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> <p class=MsoNormal align=left style='text-align:left'><b><span lang=EN-US style='font-family:Arial'>output</span></b><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>是</span><span lang=EN-US style='font-family:Arial'>2.2</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>版本以后支持的</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=150 colspan=7 style='width:149.75pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>Item</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>必须配置为主动方式</span><span lang=EN-US style='font-family:Arial'><br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family: Arial'>: <br> logrt[&quot;/home/zabbix/logs/^logfile[0-9]{1,3}$&quot;,,,100] - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>匹配文件</span><span lang=EN-US style='font-family: Arial'> &quot;logfile1&quot; (</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>不会匹配文件</span><span lang=EN-US style='font-family:Arial'> &quot;.logfile1&quot;)<br> logrt[&quot;/home/user/logfile_.*_[0-9]{1,3}&quot;,&quot;pattern_to_match&quot;,&quot;UTF-8&quot;,100] - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>匹配文件</span><span lang=EN-US style='font-family:Arial'> “logfile_abc_1” or “logfile__001”.<br> <br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>日志轮询基于最新的日志轮询时间或文件</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.dns[&lt;ip&gt;,zone,&lt;type&gt;,&lt;timeout&gt;,&lt;count&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=30 colspan=4 style='width:30.2pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> <p class=MsoNormal><span style='mso-bidi-font-size:10.5pt;font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>检测</span><span lang=EN-US style='mso-bidi-font-size:10.5pt;font-family: Arial'>DNS</span><span style='mso-bidi-font-size:10.5pt;font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>服务是否启动</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=67 colspan=23 style='width:66.75pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>0 - DNS</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>故障</span><span lang=EN-US style='font-family:Arial'> (server </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>无响应或者</span><span lang=EN-US style='font-family: Arial'> DNS</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>解析失败</span><span lang=EN-US style='font-family:Arial'>)<br> 1 - DNS</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>正常</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=118 colspan=24 style='width:117.75pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>ip -<span style="mso-spacerun:yes"> </span>DNS server</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>的</span><span lang=EN-US style='font-family:Arial'>ip</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>地址</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>为空将会用默认的</span><span lang=EN-US style='font-family: Arial'>DNS, windows</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>上被忽略</span><span lang=EN-US style='font-family:Arial'>)<br> zone -<span style="mso-spacerun:yes"> </span>DNS</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的</span><span lang=EN-US style='font-family:Arial'>zone</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>测试</span><span lang=EN-US style='font-family: Arial'><br> type - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>记录类型</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认是</span><span lang=EN-US style='font-family:Arial'> SOA)<br> timeout (Windows</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>上忽略</span><span lang=EN-US style='font-family:Arial'>) - </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>请求时间</span><span style='font-family:Arial'> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'> 1</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>秒</span><span lang=EN-US style='font-family:Arial'>)<br> count (Windows</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>被忽略</span><span lang=EN-US style='font-family:Arial'>) - </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>请求重试的次数</span><span lang=EN-US style='font-family:Arial'>(</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认是</span><span lang=EN-US style='font-family: Arial'>2)<o:p></o:p></span></p> </td> <td width=175 colspan=10 style='width:174.5pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> net.dns[8.8.8.8,zabbix.com,MX,2,1] <br> type </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>的类型值</span><span lang=EN-US style='font-family:Arial'>: <br> ANY, A, NS, CNAME, MB, MG, MR, PTR, MD, MF, MX, SOA, NULL, WKS (except for Windows), HINFO, MINFO, TXT, SRV<br> SRV </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>记录类型</span><span lang=EN-US style='font-family:Arial'> 1.8.6 (Linux) </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>,</span><span lang=EN-US style='font-family:Arial'> 2.0.0 (Windows).</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>以上版本才支持</span><span lang=EN-US style='font-family:Arial'><br> <span style="mso-spacerun:yes"> </span></span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>不支持国际化域名,请使用</span><span lang=EN-US style='font-family:Arial'>IDNA</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的编码名称代替。</span><span lang=EN-US style='font-family: Arial'><br> Zabbix 2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>以前的版本叫</span><span lang=EN-US style='font-family:Arial'>: net.tcp.dns<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.dns.record[&lt;ip&gt;,zone,&lt;type&gt;,&lt;timeout&gt;,&lt;count&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=30 colspan=4 style='width:30.2pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>执行</span><span lang=EN-US style='font-family:Arial'>DNS</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>查询</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=39 colspan=14 style='width:39.0pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>成功时返回一个字符串与信息所需的类型</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=119 colspan=28 style='width:119.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>ip -<span style="mso-spacerun:yes"> </span>DNS server</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的</span><span lang=EN-US style='font-family:Arial'>ip</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>地址</span><span lang=EN-US style='font-family: Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>为空将会用默认的</span><span lang=EN-US style='font-family:Arial'>DNS, windows</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>上被忽略</span><span lang=EN-US style='font-family: Arial'>)<br> zone -<span style="mso-spacerun:yes"> </span>DNS</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的</span><span lang=EN-US style='font-family:Arial'>zone</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>测试</span><span lang=EN-US style='font-family: Arial'><br> type - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>记录类型</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认是</span><span lang=EN-US style='font-family:Arial'> SOA)<br> timeout (Windows</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>上忽略</span><span lang=EN-US style='font-family:Arial'>) - </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>请求时间</span><span style='font-family:Arial'> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'> 1</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>秒</span><span lang=EN-US style='font-family:Arial'>)<br> count (Windows</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>被忽略</span><span lang=EN-US style='font-family:Arial'>) - </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>请求重试的次数</span><span lang=EN-US style='font-family:Arial'>(</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认是</span><span lang=EN-US style='font-family: Arial'>2)<o:p></o:p></span></p> </td> <td width=201 colspan=15 style='width:200.75pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> net.dns[8.8.8.8,zabbix.com,MX,2,1] <br> type </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>的类型值</span><span lang=EN-US style='font-family:Arial'>: <br> ANY, A, NS, CNAME, MB, MG, MR, PTR, MD, MF, MX, SOA, NULL, WKS (except for Windows), HINFO, MINFO, TXT, SRV<br> SRV </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>记录类型</span><span lang=EN-US style='font-family:Arial'> 1.8.6 (Linux) </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>,</span><span lang=EN-US style='font-family:Arial'> 2.0.0 (Windows).</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>以上版本才支持</span><span lang=EN-US style='font-family:Arial'><br> <span style="mso-spacerun:yes"> </span></span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>不支持国际化域名,请使用</span><span lang=EN-US style='font-family:Arial'>IDNA</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的编码名称代替。</span><span lang=EN-US style='font-family: Arial'><br> Zabbix 2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>以前的版本叫</span><span lang=EN-US style='font-family:Arial'>: net.tcp.dns.query<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.if.collisions[if]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=202 colspan=48 style='width:201.95pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>超出窗口滑动的冲突数</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=51 colspan=7 style='width:51.4pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=86 colspan=4 style='width:85.5pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>if -</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>网卡接口</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=50 colspan=2 style='width:50.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.if.discovery<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=103 colspan=28 style='width:102.95pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>列出为网卡,用于</span><span lang=EN-US style='font-family:Arial'> low-level discovery.<o:p></o:p></span></p> </td> <td width=74 colspan=14 style='width:74.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>JSON</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>对象</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=31 colspan=8 style='width:30.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Zabbix agent<span style="mso-spacerun:yes"> </span>2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>以上版本支持</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.if.in[if,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=39 colspan=10 style='width:39.2pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>网卡进流量</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=26 colspan=7 style='width:26.25pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=137 colspan=31 style='width:136.5pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>if - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>接口名字</span><span lang=EN-US style='font-family: Arial'><br> mode - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选参数:</span><span lang=EN-US style='font-family:Arial'>bytes - </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>总的字节流量数</span><span lang=EN-US style='font-family:Arial'>(</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>)<br> packets - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>发包的数量</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>errors - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>错误数据包的个数</span><span lang=EN-US style='font-family: Arial'><br> dropped - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>丢包的个数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=187 colspan=13 style='width:187.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>多个字节的网卡名称在</span><span lang=EN-US style='font-family:Arial'> Windows</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>上</span><span lang=EN-US style='font-family:Arial'>agent</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>需</span><span lang=EN-US style='font-family:Arial'>1.8.6</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>以后版本支持</span><span lang=EN-US style='font-family: Arial'><br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>举例</span><span lang=EN-US style='font-family: Arial'>:<br> net.if.in[eth0,errors]<br> net.if.in[eth0]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>数据存储应该用</span><span lang=EN-US style='font-family:Arial'> (speed per second) .<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.if.out[if,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=36 colspan=9 style='width:36.2pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>网卡出流量</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=29 colspan=8 style='width:29.25pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=137 colspan=31 style='width:136.5pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>if - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>接口名字</span><span lang=EN-US style='font-family: Arial'><br> mode - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选参数:</span><span lang=EN-US style='font-family:Arial'>bytes - </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>总的字节流量数</span><span lang=EN-US style='font-family:Arial'>(</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>)<br> packets - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>发包的数量</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>errors - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>错误数据包的个数</span><span lang=EN-US style='font-family: Arial'><br> dropped - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>丢包的个数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=187 colspan=13 style='width:187.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>多个字节的网卡名称在</span><span lang=EN-US style='font-family:Arial'> Windows</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>上</span><span lang=EN-US style='font-family:Arial'>agent</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>需</span><span lang=EN-US style='font-family:Arial'>1.8.6</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>以后版本支持</span><span lang=EN-US style='font-family: Arial'><br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>举例</span><span lang=EN-US style='font-family: Arial'>:<br> net.if.out[eth0,errors]<br> net.if.out[eth0]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>数据存储应该用</span><span lang=EN-US style='font-family:Arial'> (speed per second) .<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.if.total[if,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=36 colspan=9 style='width:36.2pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>网卡进出总流量</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=29 colspan=8 style='width:29.25pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=143 colspan=33 style='width:142.55pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>if - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>接口名字</span><span lang=EN-US style='font-family: Arial'><br> mode - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选参数:</span><span lang=EN-US style='font-family:Arial'>bytes - </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>总的字节流量数</span><span lang=EN-US style='font-family:Arial'>(</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>)<br> packets - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>发包的数量</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>errors - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>错误数据包的个数</span><span lang=EN-US style='font-family: Arial'><br> dropped - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>丢包的个数</span><span lang=EN-US style='font-family:Arial'><br style='mso-special-character:line-break'> <![if !supportLineBreakNewLine]><br style='mso-special-character:line-break'> <![endif]><o:p></o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> net.if.total[eth0,errors]<br> net.if.total[eth0]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>数据存储应该用</span><span lang=EN-US style='font-family:Arial'> (speed per second) .<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>注意</span><span lang=EN-US style='font-family: Arial'>dropped packets</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>仅工作在支持</span><span lang=EN-US style='font-family:Arial'> net.if.in</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>和</span><span lang=EN-US style='font-family:Arial'>net.if.out</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>能支持</span><span lang=EN-US style='font-family: Arial'>dropped packets</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>的系统平台</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.tcp.listen[port]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=65 colspan=16 style='width:64.7pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测</span><span lang=EN-US style='font-family:Arial'>tcp</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>端口是否被监听</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=64 colspan=17 style='width:63.75pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>0 -</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>没有监听</span><span lang=EN-US style='font-family: Arial'><br> 1 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>监听中</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=74 colspan=16 style='width:74.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>port - TCP </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>端口(</span><span lang=EN-US style='font-family: Arial'>1-65535</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>)</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=187 colspan=12 style='width:186.5pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> net.tcp.listen[80]<br> Linux</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>上</span><span lang=EN-US style='font-family:Arial'>Zabbix agent 1.8.4</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>以上版本支持</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.tcp.port[&lt;ip&gt;,port]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测</span><span lang=EN-US style='font-family:Arial'>tcp</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>端口是否可连接</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=18 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>0 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>不能连接</span><span lang=EN-US style='font-family: Arial'><br> 1 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可连接</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=97 colspan=21 style='width:96.55pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>ip - ip</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>地址</span><span lang=EN-US style='font-family: Arial'>(</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认是</span><span lang=EN-US style='font-family:Arial'>127.0.0.1)<br> port -</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>端口</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Example:<br> net.tcp.port[,80]</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>测试</span><span lang=EN-US style='font-family:Arial'>tcp</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的</span><span lang=EN-US style='font-family:Arial'>80</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>是否可用连接</span><span lang=EN-US style='font-family: Arial'><br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>老版本的名称</span><span lang=EN-US style='font-family:Arial'>: check_port[*]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>对于</span><span lang=EN-US style='font-family: Arial'>tcp</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>简单的性能测试,用</span><span lang=EN-US style='font-family:Arial'>net.tcp.service.perf[tcp,&lt;ip&gt;,&lt;port&gt;]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial;mso-ansi-language:ZH-CN'>注意</span><span style='font-family:Arial;mso-ansi-language:ZH-CN'>,</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial;mso-ansi-language:ZH-CN'>这些检查可能会导致额外的系统守护进程日志中的消息</span><span lang=EN-US style='font-family:Arial'> (SMTP and SSH </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的会话将被记录到日志</span><span lang=EN-US style='font-family:Arial'>).<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.tcp.service[service,&lt;ip&gt;,&lt;port&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=34 colspan=6 style='width:33.75pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测服务是否运行和是否能接受</span><span lang=EN-US style='font-family:Arial'>tcp</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>数据连接</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=43 colspan=15 style='width:42.95pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>0 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>服务故障</span><span lang=EN-US style='font-family: Arial'>1 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>服务运行</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=76 colspan=15 style='width:75.65pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><b><span lang=EN-US style='font-family:Arial'>service</span></b><span lang=EN-US style='font-family:Arial'> </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>包括</span><span lang=EN-US style='font-family:Arial'>:<br> ssh, ntp, ldap, smtp, ftp, http, pop,nntp, map,<o:p></o:p></span></p> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>tcp,https, telnet<br> <b>ip</b> - ip</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>地址</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认是</span><span lang=EN-US style='font-family:Arial'>127.0.0.1)<br> <b>port</b> - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>端口</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认用标准的服务端口</span><span lang=EN-US style='font-family:Arial'>)<o:p></o:p></span></p> </td> <td width=237 colspan=25 style='width:236.85pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>举例</span><span lang=EN-US style='font-family:Arial'>:<br> net.tcp.service[ftp,,45] - </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测</span><span lang=EN-US style='font-family:Arial'>ftp45</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>端口是否提供服务</span><span lang=EN-US style='font-family:Arial'><br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial;mso-ansi-language:ZH-CN'>注意</span><span style='font-family:Arial;mso-ansi-language:ZH-CN'>,</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial;mso-ansi-language:ZH-CN'>这些检查可能会导致额外的系统守护进程日志中的消息</span><span lang=EN-US style='font-family:Arial'> (SMTP and SSH </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>会话将被记录到日志</span><span lang=EN-US style='font-family:Arial'>).<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>检测加密的协议</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例如</span><span lang=EN-US style='font-family:Arial'> IMAP </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>端口</span><span lang=EN-US style='font-family:Arial'> 993 or POP</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>端口</span><span lang=EN-US style='font-family: Arial'> 995) </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>当前不被支持</span><span style='font-family:Arial'> </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>,替换的检测方式是</span><span lang=EN-US style='font-family:Arial'> net.tcp.port .<br> Windows agent</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>当前不支持</span><span lang=EN-US style='font-family:Arial'>LDAP and HTTPS <o:p></o:p></span></p> <p class=MsoNormal align=left style='text-align:left'><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>注意检测</span><span lang=EN-US style='font-family:Arial'> telnet</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>服务会有一个提示</span><span lang=EN-US style='font-family: Arial'> ('</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>结尾有</span><span lang=EN-US style='font-family:Arial'>’:’).<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>老版本的名称为</span><span lang=EN-US style='font-family:Arial'>: check_service[*]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>注意</span><span lang=EN-US style='font-family: Arial'> 1.8.3</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>版本以前检测</span><span lang=EN-US style='font-family:Arial'>ntp</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的的</span><span lang=EN-US style='font-family:Arial'>key</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>为</span><span lang=EN-US style='font-family:Arial'> service.ntp<o:p></o:p></span></p> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>https</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>和</span><span lang=EN-US style='font-family:Arial'>telnet</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>服务检测自</span><span lang=EN-US style='font-family:Arial'>2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>版本后才被支持</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.tcp.service.perf[service,&lt;ip&gt;,&lt;port&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=25 colspan=2 style='width:24.85pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测服务性能</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=52 colspan=19 style='width:51.85pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>0 - </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>服务故障</span><span lang=EN-US style='font-family:Arial'>;<br> seconds - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>连接服务所需的时间</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=76 colspan=15 style='width:75.65pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>service - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>包括</span><span lang=EN-US style='font-family: Arial'>:<br> ssh, ntp, ldap, smtp, ftp, http, pop,nntp, imap, tcp, https, telnet<br> ip - IP</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>地址</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认为</span><span lang=EN-US style='font-family:Arial'> 127.0.0.1)<br> port - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>端口</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认用标准的服务端口</span><span lang=EN-US style='font-family:Arial'>)<o:p></o:p></span></p> </td> <td width=237 colspan=25 style='width:236.85pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>举例</span><span lang=EN-US style='font-family:Arial'>:<br> net.tcp.service.perf[ssh] -</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测连接</span><span lang=EN-US style='font-family:Arial'>SSH server</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>并得到响应所花费的时间</span><span lang=EN-US style='font-family:Arial'>.<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>检测加密的协议</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例如</span><span lang=EN-US style='font-family:Arial'> IMAP </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>端口</span><span lang=EN-US style='font-family:Arial'> 993 or POP</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>端口</span><span lang=EN-US style='font-family: Arial'> 995) </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>当前不被支持</span><span style='font-family:Arial'> </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>,替换的检测方式是</span><span lang=EN-US style='font-family:Arial'> net.tcp.service.perf[tcp,&lt;ip&gt;,&lt;port&gt;] <o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Windows agent</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>当前不支持</span><span lang=EN-US style='font-family: Arial'>LDAP and HTTPS <br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>注意检测</span><span lang=EN-US style='font-family:Arial'>telnet</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>服务会有一个提示</span><span lang=EN-US style='font-family:Arial'> ('</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>结尾有</span><span lang=EN-US style='font-family: Arial'>’:’).<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>老版本的名称为</span><span lang=EN-US style='font-family:Arial'>: check_service_perf[*]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>注意</span><span lang=EN-US style='font-family: Arial'> 1.8.3</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>版本以前检测</span><span lang=EN-US style='font-family:Arial'>ntp</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的的</span><span lang=EN-US style='font-family:Arial'>key</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>为</span><span lang=EN-US style='font-family:Arial'> service.ntp<br> https</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>和</span><span lang=EN-US style='font-family:Arial'>telnet</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>服务检测自</span><span lang=EN-US style='font-family:Arial'>2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>版本后才被支持</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>net.udp.listen[port]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=56 colspan=14 style='width:56.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测</span><span lang=EN-US style='font-family:Arial'>udp</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>端是否可用</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=64 colspan=17 style='width:64.05pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>0 -</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>不可用</span><span lang=EN-US style='font-family: Arial'><br> 1 -</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>正在使用</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=103 colspan=21 style='width:103.0pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>port - udp</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>端口</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=166 colspan=9 style='width:165.9pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> net.udp.listen[68]<br> Linux agent</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>版本在</span><span lang=EN-US style='font-family:Arial'>1.8.4</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>被支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>proc.mem[&lt;name&gt;,&lt;user&gt;,&lt;mode&gt;,&lt;cmdline&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>特定用户使用的进程占用的内存大小</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=18 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>进程所占用的内存大小</span><span style='font-family:Arial'> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>(单位是</span><span lang=EN-US style='font-family: Arial'>bytes).<o:p></o:p></span></p> </td> <td width=97 colspan=21 style='width:96.55pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>name - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>进程名称</span><span lang=EN-US style='font-family: Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认为</span><span lang=EN-US style='font-family:Arial'>“all processes”)<br> user - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用户名</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认</span><span lang=EN-US style='font-family:Arial'>“all users”)<br> mode - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选值</span><span lang=EN-US style='font-family:Arial'>:<br> avg, max, min, sum (</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>)<br> cmdline -</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>过滤的命令</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> proc.mem[,root] - root</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用户下所有进程占用的内存大小</span><span lang=EN-US style='font-family:Arial'>proc.mem[zabbix_server,zabbix] -<span style="mso-spacerun:yes"> </span>zabbix_server </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>程序以</span><span lang=EN-US style='font-family: Arial'> zabbix</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用户运行所占用的内存大小</span><span lang=EN-US style='font-family:Arial'><br> proc.mem[,oracle,max,oracleZABBIX] -oracle</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>用户占用的最大内存,命令中含有</span><span lang=EN-US style='font-family:Arial'> oracleZABBIX<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>proc.num[&lt;name&gt;,&lt;user&gt;,&lt;state&gt;,&lt;cmdline&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>运行于某个用户下的进程数量,并且包含进程的状态</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=31 colspan=10 style='width:31.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>进程的数量</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=102 colspan=22 style='width:102.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>name - </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>进程名称</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>“all processes”)<br> user - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用户名</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认</span><span lang=EN-US style='font-family:Arial'>default is “all users”)<br> state -</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选值</span><span lang=EN-US style='font-family:Arial'>: all(</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), run, sleep, zomb<br> cmdline - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>命令过滤的条件</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=210 colspan=18 style='width:210.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>举例</span><span lang=EN-US style='font-family:Arial'>:<br> proc.num[,mysql] - mysql</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用户下所有的进程数量</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>proc.num[apache2,www-data] - apache2</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>进程数量,其用户是</span><span lang=EN-US style='font-family:Arial'> www-data<o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>proc.num[,oracle,sleep,oracleZABBIX] - oracle</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用户的进程数量,进程状态为</span><span lang=EN-US style='font-family:Arial'>sleep</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>,在进程的命令中包含</span><span lang=EN-US style='font-family:Arial'>oracleZABBIX</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>字符串</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>在</span><span lang=EN-US style='font-family:Arial'>Windows</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>版本中</span><span lang=EN-US style='font-family:Arial'>, </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>仅进程名字和用户参数支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>sensor[device,sensor,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=32 colspan=5 style='width:32.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>读取硬件传感器</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=13 colspan=6 style='width:13.0pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=121 colspan=28 style='width:121.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>device - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>设备名称</span><span lang=EN-US style='font-family: Arial'><span style="mso-spacerun:yes"> </span>(</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>如果</span><span lang=EN-US style='font-family: Arial'> &lt;mode&gt; </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>参数使用这里可以用正则表达式</span><span lang=EN-US style='font-family:Arial'>)<br> sensor - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>传感器名称</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>如果</span><span lang=EN-US style='font-family:Arial'> &lt;mode&gt; </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>参数使用这里可以用正则表达式</span><span lang=EN-US style='font-family:Arial'>)<br> mode - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选参数</span><span lang=EN-US style='font-family:Arial'>:<br> avg, max, min </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>如果省略此参数</span><span lang=EN-US style='font-family:Arial'>, device </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>和</span><span lang=EN-US style='font-family:Arial'> sensor</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>参数必须使用完整的名称</span><span lang=EN-US style='font-family:Arial'>).<o:p></o:p></span></p> </td> <td width=223 colspan=22 style='width:222.5pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>在</span><span lang=EN-US style='font-family:Arial'>Linux 2.4,</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>读取的是</span><span lang=EN-US style='font-family:Arial'> /proc/sys/dev/sensors.<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family: Arial'>:<br> sensor[w83781d-i2c-0-2d,temp1]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>在</span><span lang=EN-US style='font-family: Arial'>Zabbix 1.8.4</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>版本中</span><span lang=EN-US style='font-family:Arial'>, </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>使用</span><span lang=EN-US style='font-family:Arial'> sensor[temp1] </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>这种格式</span><span lang=EN-US style='font-family: Arial'>.<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>在</span><span lang=EN-US style='font-family: Arial'>OpenBSD, </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>读取的是</span><span lang=EN-US style='font-family:Arial'> hw.sensors MIB.<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family: Arial'>:<br> sensor[cpu0,temp0] -<span style="mso-spacerun:yes"> </span>CPU0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的温度</span><span lang=EN-US style='font-family: Arial'><br> sensor[cpu[0-2]$,temp,avg] - CPU0</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>到</span><span lang=EN-US style='font-family:Arial'>cpu2</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>这</span><span lang=EN-US style='font-family:Arial'>3</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>个</span><span lang=EN-US style='font-family:Arial'>cpu</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的平均温度</span><span lang=EN-US style='font-family: Arial'><br> <span style="mso-spacerun:yes"> </span>Zabbix 1.8.4</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>以上版本支持</span><span lang=EN-US style='font-family: Arial'>OpenBSD<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.boottime<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=78 colspan=22 style='width:78.05pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>系统启动时间</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=33 colspan=7 style='width:33.4pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=55 colspan=10 style='width:55.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=223 colspan=22 style='width:222.5pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>UNIX</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>时间戳</span><span lang=EN-US style='font-family: Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>日期时间</span><span lang=EN-US style='font-family:Arial'>).<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.cpu.intr<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=93 colspan=26 style='width:93.05pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Cpu</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的中断数</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=51 colspan=9 style='width:51.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=64 colspan=15 style='width:63.85pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.cpu.load[&lt;cpu&gt;,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=34 colspan=7 style='width:34.4pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>CPU</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>负载</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=59 colspan=19 style='width:58.65pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>进程的负载</span><span lang=EN-US style='font-family:Arial'>.<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>数据类型为浮点数</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=166 colspan=30 style='width:166.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>cpu - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>可用的参数</span><span lang=EN-US style='font-family: Arial'>:<br> all (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), percpu (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>单颗</span><span lang=EN-US style='font-family: Arial'>cpu</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>的负载</span><span lang=EN-US style='font-family:Arial'>)<br> mode - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可用的参数</span><span lang=EN-US style='font-family:Arial'>:<br> avg1 (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>一分钟的负载</span><span lang=EN-US style='font-family:Arial'>, </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), avg5 (5</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>分组的负载</span><span lang=EN-US style='font-family: Arial'>),avg15 (15</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>分钟的负载</span><span lang=EN-US style='font-family:Arial'>)<o:p></o:p></span></p> </td> <td width=130 colspan=5 style='width:129.8pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> system.cpu.load[,avg5]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>老版本的名称</span><span lang=EN-US style='font-family:Arial'>: system.cpu.loadX<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>参数</span><span lang=EN-US style='font-family: Arial'> percpu </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>从</span><span lang=EN-US style='font-family:Arial'>2.0.0</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>以上版本支持</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.cpu.num[&lt;type&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>CPU</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的数量</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=18 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>CPU</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>数量</span><span lang=EN-US style='font-family: Arial'>.<o:p></o:p></span></p> </td> <td width=97 colspan=21 style='width:96.55pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>type - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>可用的参数</span><span lang=EN-US style='font-family: Arial'>:<br> online (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), max<o:p></o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> system.cpu.num<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.cpu.switches<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=126 colspan=32 style='width:126.45pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>CPU</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>的上下文切换数值</span><span lang=EN-US style='font-family: Arial'>.<o:p></o:p></span></p> </td> <td width=61 colspan=13 style='width:60.7pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>切换数量</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=21 colspan=5 style='width:20.85pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>老版本名称</span><span lang=EN-US style='font-family:Arial'>: system[switches]<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.cpu.util[&lt;cpu&gt;,&lt;type&gt;,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>CPU(s) </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>利用率</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=52 colspan=16 style='width:51.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>处理器的利用率</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=176 colspan=30 style='width:175.9pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>cpu - CPU</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>数量</span><span lang=EN-US style='font-family:Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认为所有</span><span lang=EN-US style='font-family: Arial'>CPU)<br> type - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可用的参数</span><span lang=EN-US style='font-family:Arial'>:<br> idle, nice, user (</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), system(Windows</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>版本默认</span><span lang=EN-US style='font-family: Arial'>), iowait,interrupt, softirq, steal<br> mode - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可选参数</span><span lang=EN-US style='font-family:Arial'>:<br> avg1 (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>一分钟负载</span><span lang=EN-US style='font-family:Arial'>,</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), avg5 (5</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>分钟的负载</span><span lang=EN-US style='font-family: Arial'>),avg15 (15</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>分钟的负载</span><span lang=EN-US style='font-family:Arial'>)<o:p></o:p></span></p> </td> <td width=116 colspan=4 style='width:116.15pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> system.cpu.util[0,user,avg5]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>老版本名称</span><span lang=EN-US style='font-family:Arial'>: <o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.cpu.idleX, system.cpu.niceX, system.cpu.systemX, system.cpu.userX<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.hostname[&lt;type&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=53 colspan=12 style='width:52.85pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回</span><span lang=EN-US style='font-family:Arial'>hostname.<o:p></o:p></span></p> </td> <td width=18 colspan=7 style='width:18.4pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=72 colspan=15 style='width:72.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>type (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>仅用于</span><span lang=EN-US style='font-family: Arial'>Windows, </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>其他系统被忽略</span><span lang=EN-US style='font-family:Arial'>) <o:p></o:p></span></p> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>可用的参数</span><span lang=EN-US style='font-family:Arial'>:netbios (</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>默认</span><span lang=EN-US style='font-family:Arial'>) </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>或者</span><span lang=EN-US style='font-family: Arial'> host<o:p></o:p></span></p> </td> <td width=246 colspan=27 style='width:245.7pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>在</span><span lang=EN-US style='font-family:Arial'>windows</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>系统上面,获取值用的是</span><span lang=EN-US style='font-family:Arial'> GetComputerName() (fornetbios)</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>这</span><span lang=EN-US style='font-family:Arial'>2</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>个函数,其他系统用的是</span><span lang=EN-US style='font-family:Arial'> “hostname”</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>这个命令。</span><span lang=EN-US style='font-family:Arial'><br> Type</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>参数从</span><span lang=EN-US style='font-family:Arial'> 1.8.6</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>以后版本支持</span><span lang=EN-US style='font-family:Arial'>.<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>返回值举例</span><span lang=EN-US style='font-family:Arial'>:<br> Linux:<br> system.hostname → linux-w7x1<br> system.hostname → www.zabbix.com<br> Windows:<br> system.hostname → WIN-SERV2008-I6<br> system.hostname[host] → Win-Serv2008-I6LonG.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.hw.chassis[&lt;info&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=14 style='width:13.95pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>平台架构</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=20 colspan=6 style='width:20.45pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=109 colspan=27 style='width:109.1pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>info - full (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>), model, serial, type </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>或者</span><span lang=EN-US style='font-family:Arial'> vendor<o:p></o:p></span></p> </td> <td width=246 colspan=27 style='width:245.7pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>: <o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.hw.chassis[full]<br> Hewlett-Packard HP Pro 3010 Small Form Factor PC CZXXXXXXXX Desktop]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>必须有</span><span lang=EN-US style='font-family:Arial'>root</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>权限,因为这值是从内存中读取的</span><span lang=EN-US style='font-family:Arial'><br> Zabbix agent 2.0</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>以上版本支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.hw.cpu[&lt;cpu&gt;,&lt;info&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=29 colspan=3 style='width:28.95pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>显示</span><span lang=EN-US style='font-family:Arial'>CPU</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>信息</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=30 colspan=12 style='width:30.0pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>字符串数值类型</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=109 colspan=25 style='width:109.1pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>cpu - </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>所有</span><span lang=EN-US style='font-family:Arial'>CPU(</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>)<br> info - full (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), curfreq, maxfreq, model </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>或</span><span lang=EN-US style='font-family:Arial'> vendor</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>中之一</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=221 colspan=21 style='width:221.15pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>: system.hw.cpu[0,vendor]<br> AuthenticAMD<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>所有的信息是从</span><span lang=EN-US style='font-family:Arial'>/proc/cpuinfo</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>和</span><span lang=EN-US style='font-family:Arial'> /sys/devices/system/cpu/[cpunum]/cpufreq/cpuinfo_max_freq.</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>收集得到的</span><span lang=EN-US style='font-family: Arial'><br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>如果</span><span lang=EN-US style='font-family: Arial'> CPU</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>数量和</span><span lang=EN-US style='font-family:Arial'>curfreq </span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>或者</span><span lang=EN-US style='font-family:Arial'> maxfreq</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>参数已经设置</span><span lang=EN-US style='font-family: Arial'>,</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>将会返回</span><span lang=EN-US style='font-family:Arial'>CPU</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的主频</span><span lang=EN-US style='font-family:Arial'> (Hz).<br> system.hw.cpu[0,curfreq]<o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>2394000000<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>从</span><span lang=EN-US style='font-family: Arial'>Zabbix agent 2.0</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.hw.devices[&lt;type&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>列出</span><span lang=EN-US style='font-family:Arial'>PCI</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>或</span><span lang=EN-US style='font-family:Arial'>USB</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>设备</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=26 colspan=8 style='width:25.9pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文本</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=97 colspan=21 style='width:96.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>type - pci (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>) </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>或者</span><span lang=EN-US style='font-family:Arial'>usb<o:p></o:p></span></p> </td> <td width=221 colspan=21 style='width:221.15pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.hw.devices[pci]<o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-size:7.5pt;font-family:Arial'>00:15.0 PCI bridge: VMware PCI Express Root Port (rev 01)<o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-size:7.5pt;font-family:Arial'>00:15.1 PCI bridge: VMware PCI Express Root Port (rev 01)</span><span lang=EN-US style='font-family:Arial'><br> system.hw.devices[usb] <o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-size:7.5pt;font-family:Arial'>Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub<o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-size:7.5pt;font-family:Arial'>Bus 002 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub</span><span lang=EN-US style='font-family:Arial'><br> 2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>版本以上支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.hw.macaddr[&lt;interface&gt;,&lt;format&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>列出</span><span lang=EN-US style='font-family:Arial'>MAC</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>地址</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=36 colspan=12 style='width:36.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=87 colspan=17 style='width:86.6pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>interface - all (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>) or </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>或正则表达式</span><span lang=EN-US style='font-family:Arial'><br> format - full (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>) </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>或者</span><span lang=EN-US style='font-family:Arial'> short<o:p></o:p></span></p> </td> <td width=221 colspan=21 style='width:221.15pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>: system.hw.macaddr[&quot;eth0$&quot;,full]<br> [eth0] 00:11:22:33:44:55<br> short</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>列出</span><span lang=EN-US style='font-family:Arial'>mac</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的格式前面没有网卡接口</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.hw.macaddr[&quot;eth0$&quot;,short]<o:p></o:p></span></p> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>00:11:22:33:44:55<br> 2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>以上版本支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.localtime[&lt;type&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=36 colspan=8 style='width:35.8pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>系统时间</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=61 colspan=19 style='width:61.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数或者是字符串</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=278 colspan=33 style='width:278.15pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span lang=EN-US style='font-family:Arial'>utc - (default) (00:00:00 UTC, January 1, 1970), </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>精确到秒</span><span lang=EN-US style='font-family: Arial'><br> local - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>时间格式为</span><span lang=EN-US style='font-family:Arial'> 'yyyy-mm-dd,hh:mm:ss.nnn,+hh:mm' </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>,</span><span lang=EN-US style='font-family:Arial'>2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>支持此参数</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=14 style='width:13.9pt;border-top:none;border-left:none;border-bottom: solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt;mso-border-top-alt:solid #8CACBB .75pt; mso-border-left-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.run[command,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>在主机上面运行命令</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=30 colspan=9 style='width:29.95pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回结果为字符</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=93 colspan=20 style='width:92.75pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>command - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>执行的命令</span><span lang=EN-US style='font-family: Arial'><br> mode - wait (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认,等待支持结果返回</span><span lang=EN-US style='font-family:Arial'>), nowait (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>不要等待执行结果返回</span><span lang=EN-US style='font-family:Arial'>)<o:p></o:p></span></p> </td> <td width=221 colspan=21 style='width:221.15pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>返回值增加到</span><span lang=EN-US style='font-family:Arial'>512KB(2.0.5</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>版本以下</span><span style='font-family:Arial'> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>版本只支持</span><span lang=EN-US style='font-family: Arial'>64KB), </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>包括后面的空格字符一起</span><span lang=EN-US style='font-family:Arial'>.<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>命令输出必须为字符,否则无法处理</span><span lang=EN-US style='font-family:Arial'>.<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family: Arial'>:<br> system.run[ls -l /] -</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>执行</span><span lang=EN-US style='font-family:Arial'>ls /</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>命令</span><span lang=EN-US style='font-family:Arial'><br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>注意</span><span lang=EN-US style='font-family: Arial'>: </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>要开启此功能需配置</span><span lang=EN-US style='font-family:Arial'>agentd.conf</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件,开启</span><span lang=EN-US style='font-family: Arial'> EnableRemoteCommands=1 option.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.stat[resource,&lt;type&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Virtual memory statistics<o:p></o:p></span></p> </td> <td width=66 colspan=18 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Numeric value<o:p></o:p></span></p> </td> <td width=278 colspan=32 style='width:277.75pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>ent - number of processor units this partition is entitled to receive (float)<br> kthr,&lt;type&gt; - information about kernel thread states:<br> r - average number of runnable kernel threads (float)<br> b - average number of kernel threads placed in the Virtual Memory Manager wait queue (float)<br> memory,&lt;type&gt; - information about the usage of virtual and real memory:<br> avm - active virtual pages (integer)<br> fre - size of the free list (integer)<br> page,&lt;type&gt; - information about page faults and paging activity:<br> fi - file page-ins per second (float)<br> fo - file page-outs per second (float)<br> pi - pages paged in from paging space (float)<br> po - pages paged out to paging space (float)<br> fr - pages freed (page replacement) (float)<br> sr - pages scanned by page-replacement algorithm (float)<br> faults,&lt;type&gt; - trap and interrupt rate:<br> in - device interrupts (float)<br> sy - system calls (float)<br> cs - kernel thread context switches (float)<br> cpu,&lt;type&gt; - breakdown of percentage usage of processor time:<br> us - user time (float)<br> sy - system time (float)<br> id - idle time (float)<br> wa - idle time during which the system had outstanding disk/NFS I/O request(s) (float)<br> pc - number of physical processors consumed (float)<br> ec - the percentage of entitled capacity consumed (float)<br> lbusy - indicates the percentage of logical processor(s) utilization that occurred while executing at the user and system level (float)<br> app - indicates the available physical processors in the shared pool (float)<br> disk,&lt;type&gt; - disk statistics:<br> bps - indicates the amount of data transferred (read or written) to the drive in bytes per second (integer)<br> tps - indicates the number of transfers per second that were issued to the physical disk/tape (float)<br> This item is supported starting from version 1.8.1.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.sw.arch<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回软件架构</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=18 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>String value<o:p></o:p></span></p> </td> <td width=53 colspan=9 style='width:53.2pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=225 colspan=23 style='width:224.55pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>: system.sw.arch<br> i686<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>信息从</span><span lang=EN-US style='font-family:Arial'> uname()</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>函数返回</span><span lang=EN-US style='font-family:Arial'>.<br> 2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>以上版本支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.sw.os[&lt;info&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 colspan=2 style='width:30.45pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 colspan=11 style='width:45.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回OS信息</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=18 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=97 colspan=21 style='width:96.55pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>info - full (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>), short </span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>或者</span><span lang=EN-US style='font-family:Arial'> name<o:p></o:p></span></p> </td> <td width=181 colspan=11 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Example: system.sw.os[short]<br> Ubuntu 2.6.35-28.50-generic 2.6.35.11<br> <br> Info is acquired from (note that not all files are present in all distributions):<br> [full] - /proc/version<br> [short] - /proc/version_signature<br> [name] - /etc/issue.net<br> <br> 2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>以上版本支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=63 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.sw.packages[&lt;package&gt;,&lt;manager&gt;,&lt;format&gt;]<o:p></o:p></span></p> </td> </tr> <![if !supportMisalignedColumns]> <tr height=0> <td width=19 style='border:none'></td> <td width=2 style='border:none'></td> <td width=15 style='border:none'></td> <td width=11 style='border:none'></td> <td width=4 style='border:none'></td> <td width=1 style='border:none'></td> <td width=2 style='border:none'></td> <td width=1 style='border:none'></td> <td width=1 style='border:none'></td> <td width=1 style='border:none'></td> <td width=0 style='border:none'></td> <td width=3 style='border:none'></td> <td width=6 style='border:none'></td> <td width=8 style='border:none'></td> <td width=2 style='border:none'></td> <td width=1 style='border:none'></td> <td width=2 style='border:none'></td> <td width=3 style='border:none'></td> <td width=1 style='border:none'></td> <td width=3 style='border:none'></td> <td width=2 style='border:none'></td> <td width=3 style='border:none'></td> <td width=1 style='border:none'></td> <td width=1 style='border:none'></td> <td width=2 style='border:none'></td> <td width=2 style='border:none'></td> <td width=3 style='border:none'></td> <td width=3 style='border:none'></td> <td width=3 style='border:none'></td> <td width=4 style='border:none'></td> <td width=4 style='border:none'></td> <td width=4 style='border:none'></td> <td width=2 style='border:none'></td> <td width=4 style='border:none'></td> <td width=2 style='border:none'></td> <td width=14 style='border:none'></td> <td width=1 style='border:none'></td> <td width=7 style='border:none'></td> <td width=1 style='border:none'></td> <td width=7 style='border:none'></td> <td width=2 style='border:none'></td> <td width=1 style='border:none'></td> <td width=3 style='border:none'></td> <td width=4 style='border:none'></td> <td width=2 style='border:none'></td> <td width=4 style='border:none'></td> <td width=3 style='border:none'></td> <td width=1 style='border:none'></td> <td width=3 style='border:none'></td> <td width=10 style='border:none'></td> <td width=1 style='border:none'></td> <td width=5 style='border:none'></td> <td width=7 style='border:none'></td> <td width=8 style='border:none'></td> <td width=7 style='border:none'></td> <td width=9 style='border:none'></td> <td width=23 style='border:none'></td> <td width=10 style='border:none'></td> <td width=22 style='border:none'></td> <td width=4 style='border:none'></td> <td width=155 style='border:none'></td> <td width=84 style='border:none'></td> <td width=43 style='border:none'></td> </tr> <![endif]> </table> <table class=MsoNormalTable border=0 cellspacing=0 cellpadding=0 style='margin-left:2.25pt;border-collapse:collapse;mso-table-layout-alt:fixed; mso-padding-alt:0cm 0cm 0cm 0cm'> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;mso-border-alt: solid #8CACBB .75pt;background:#0212C9;mso-shading:windowtext;mso-pattern: solid #C0CCDD;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border:solid #8CACBB 1.0pt;border-left: none;mso-border-left-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>列出已经安装过的软件包</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=33 colspan=3 style='width:33.4pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文本</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=108 colspan=8 style='width:107.7pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>package - all (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>) </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>或者正则表达式</span><span lang=EN-US style='font-family:Arial'><br> manager - all (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>) </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>或者一个软件包</span><span lang=EN-US style='font-family:Arial'>format - full (</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family: Arial'>) </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>或者</span><span lang=EN-US style='font-family:Arial'>short<o:p></o:p></span></p> </td> <td width=203 colspan=8 style='width:202.75pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Example: system.sw.packages[mini,dpkg,short]<br> python-minimal, python2.6-minimal, ubuntu-minimal<br> <br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>列出指定名称所有安装过的软件包</span><span lang=EN-US style='font-family:Arial'> (“all” </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>列出全部</span><span lang=EN-US style='font-family:Arial'>).<br> <br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>支持包管理选项</span><span lang=EN-US style='font-family:Arial'>:<br> manager (executed command)<br> dpkg (dpkg --get-selections)<br> pkgtool (ls /var/log/packages)<br> rpm (rpm -qa)<br> pacman (pacman -Q)<br> <br> If format is specified as full, packages are grouped by package managers (each manager on a seperate line beginning with it's name in square brackets).<br> If format is specified as short, packages are not grouped and are listed on a single line.<br> <br> Supported since Zabbix agent version 2.0.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.swap.in[&lt;device&gt;,&lt;type&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Swap</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>使用情况</span><span lang=EN-US style='font-family: Arial'>s.<o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>数值</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>device - device used for swapping (default is all)<br> type - possible values:<br> count (number of swapins), sectors(sectors swapped in), pages (pages swapped in). See <a href="https://www.zabbix.com/documentation/2.0/manual/appendix/items/supported_by_platform"><span style='color:windowtext;text-decoration:none;text-underline:none'>supported by platform</span></a> for details on defaults.<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Example key:<br> system.swap.in[,pages]<br> <br> The source of this information is:<br> Linux 2.4:<br> /proc/swaps, /proc/partitions, /proc/stat<br> Linux 2.6:<br> /proc/swaps, /proc/diskstats, /proc/vmstat<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.swap.out[&lt;device&gt;,&lt;type&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Swap out (from memory onto device) statistics.<o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Numeric value<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>device - device used for swapping (default is all)<br> type - possible values:<br> count (number of swapouts),sectors (sectors swapped out),pages (pages swapped out). See<a href="https://www.zabbix.com/documentation/2.0/manual/appendix/items/supported_by_platform"><span style='color:windowtext;text-decoration:none;text-underline:none'>supported by platform</span></a> for details on defaults.<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Example key:<br> system.swap.out[,pages]<br> <br> The source of this information is:<br> Linux 2.4:<br> /proc/swaps, /proc/partitions, /proc/stat<br> Linux 2.6:<br> /proc/swaps, /proc/diskstats, /proc/vmstat<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.swap.size[&lt;device&gt;,&lt;type&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Swap </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>空间大小</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Number of bytes or percentage.<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>device - device used for swapping (default is all)<br> type - possible values:<br> free (free swap space, default),pfree (free swap space, in percent), pused (used swap space, in percent), total (total swap space), used (used swap space)<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Example key:<br> system.swap.size[,pfree] - free swap space percentage<br> <br> Old naming: system.swap.free, system.swap.total<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.uname<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>系统版本名称</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=39 colspan=4 style='width:38.85pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=40 colspan=4 style='width:40.2pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=265 colspan=11 style='width:264.8pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例如</span><span lang=EN-US style='font-family:Arial'>:<br> </span><span lang=EN-US style='font-size:7.5pt;font-family:Arial'>Linux www.zabbix.com 2.6.32-358.el6.x86_64 #1 SMP Fri Feb 22 00:31:26 UTC 2013 x86_64</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.uptime<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=90 colspan=6 style='width:89.65pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>系统启动的时长</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=50 colspan=4 style='width:49.75pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>精确到秒</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=69 colspan=5 style='width:68.6pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>配置</span><span lang=EN-US style='font-family:Arial'><a href="https://www.zabbix.com/documentation/2.0/manual/config/items/item%23configuration"><span style='color:windowtext;text-decoration:none;text-underline:none'>item</span><span lang=ZH-CN style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;color:windowtext;text-decoration:none;text-underline:none'>的</span></a></span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>时候</span><span lang=EN-US style='font-family: Arial'>, </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用</span><span lang=EN-US style='font-family:Arial'> s </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>或者</span><span lang=EN-US style='font-family:Arial'> uptime </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>为单位</span><span lang=EN-US style='font-family: Arial'>.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>system.users.num<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=74 colspan=2 style='width:73.95pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>已登录用户数</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=65 colspan=8 style='width:65.45pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用户的数量</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=69 colspan=5 style='width:68.6pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>用</span><span lang=EN-US style='font-family:Arial'>who</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>命令收集得到(</span><span lang=EN-US style='font-family:Arial'>linux</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>系统)</span><span lang=EN-US style='font-family: Arial'>.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.dev.read[&lt;device&gt;,&lt;type&gt;,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Disk read statistics.<o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Integer if typeis in: sectors,operations,bytes <br> <br> Float if type is in: sps, ops, bps<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>device - disk device (default is “all”1) <br> type - possible values:<br> sectors, operations, bytes, sps,ops, bps (must be specified, since defaults differ under various OSes). <br> sps, ops, bps stand for: sectors, operations, bytes per second, respectively <br> mode - possible values:<br> avg1 (one-minute average, default), avg5 (five-minute average), avg15 (15-minute average). <br> Note: The third parameter is supported only if the type is in: sps, ops, bps.<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Default values of 'type' parameter for different OSes:<br> FreeBSD - bps<br> Linux - sps<br> OpenBSD - operations<br> Solaris - bytes<br> <br> Example key: vfs.dev.read[,operations]<br> Old naming: io[*] <br> <br> Usage of the type parameters ops, bps and sps on supported platforms is limited to 8 devices (7 individual devices and one “all”). Starting with Zabbix 2.0.1 <a href="https://www.zabbix.com/documentation/2.0/manual/introduction/whatsnew201%23daemon_improvements"><span style='color:windowtext;text-decoration:none;text-underline:none'>this limit is increased</span></a> to 1024 (1023 individual devices and one for “all”).<br> <br> Supports LVM since Zabbix 1.8.6.<br> <br> Until Zabbix 1.8.6, only relative device names may be used (for example, sda), since 1.8.6 an optional /dev/ prefix may be used (for example, /dev/sda)<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.dev.write[&lt;device&gt;,&lt;type&gt;,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>硬盘写性能检测</span><span lang=EN-US style='font-family:Arial'>.<o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>整数,如果</span><span lang=EN-US style='font-family:Arial'>type</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>参数为</span><span lang=EN-US style='font-family:Arial'>: sectors,operations,bytes<br> <br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>浮点数,如果</span><span lang=EN-US style='font-family:Arial'> type</span><span style='font-family:宋体;mso-ascii-font-family: Arial;mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>参数为</span><span lang=EN-US style='font-family:Arial'>: sps, ops, bps<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>device - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>磁盘名称</span><span lang=EN-US style='font-family: Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>,默认为</span><span lang=EN-US style='font-family:Arial'> “all”1) <br> type - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>参数</span><span lang=EN-US style='font-family:Arial'> sectors, operations, bytes, sps, ops, bps (must specify exactly which parameter to use, since defaults are different under various OSes). <br> sps, ops, bps means: sectors, operations, bytes per second respectively <br> mode - avg1 (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>),avg5 (5</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>分钟</span><span lang=EN-US style='font-family:Arial'>), avg15.<br> Note: </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>第三个参数仅在选用</span><span lang=EN-US style='font-family:Arial'>type</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>为</span><span lang=EN-US style='font-family:Arial'>: sps, ops, bps</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>才支持</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Default values of 'type' parameter for different OSes:<br> FreeBSD - bps<br> Linux - sps<br> OpenBSD - operations<br> Solaris - bytes<br> <br> Example: vfs.dev.write[,operations] Old naming: io[*] <br> <br> The type parameters ops, bps and sps on supported platforms are limited to 8 devices (7 individual devices and one “all”). Starting with Zabbix 2.0.1 <a href="https://www.zabbix.com/documentation/2.0/manual/introduction/whatsnew201%23daemon_improvements"><span style='color:windowtext;text-decoration:none;text-underline:none'>this limit is increased</span></a> to 1024 (1023 individual devices and one for “all”).<br> <br> Supports LVM since Zabbix 1.8.6.<br> <br> Until Zabbix 1.8.6, only relative device names may be used (for example, sda), since 1.8.6 optional /dev/ prefix may be used (for example, /dev/sda)<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.file.cksum[file]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件校验</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件校验,用的是</span><span lang=EN-US style='font-family:Arial'> UNIX cksum.<o:p></o:p></span></p> </td> <td width=83 colspan=5 style='width:82.5pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件的绝对路径</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=195 colspan=7 style='width:195.25pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回值</span><span lang=EN-US style='font-family:Arial'>:<br> 1938292000<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family: Arial'>:<br> vfs.file.cksum[/etc/passwd]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>老版本名称</span><span lang=EN-US style='font-family:Arial'>: cksum<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>文件大小的支持取决于是否支持大文件</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.file.contents[file,&lt;encoding&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>获取文件内存</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=118 colspan=10 style='width:117.95pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Contents of a file or EOF if it is empty or it contains only LF/CR characters.<o:p></o:p></span></p> </td> <td width=45 colspan=4 style='width:44.7pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件的绝对路径</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>:<br> vfs.file.contents[/etc/passwd]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>文件大小为不超过</span><span lang=EN-US style='font-family:Arial'> 64 Kbytes.<br> 2.0</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>以上版本支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.file.exists[file]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>检测文件是否存在</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>1 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件或链接</span><span lang=EN-US style='font-family: Arial'> (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>软连接或硬链接</span><span lang=EN-US style='font-family:Arial'>) </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>存在</span><span lang=EN-US style='font-family:Arial'><br> 0 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>不存在</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=89 colspan=6 style='width:89.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件的绝对路径</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=188 colspan=6 style='width:188.4pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>例子</span><span lang=EN-US style='font-family:Arial'>: vfs.file.exists[/tmp/application.pid]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>返回的值取决于</span><span lang=EN-US style='font-family:Arial'>S_ISREG POSIX .<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>文件大小的支持取决于是否支持大文件</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.file.md5sum[file]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件的</span><span lang=EN-US style='font-family:Arial'>MD5</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>值</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件的</span><span lang=EN-US style='font-family:Arial'>MD5 hash</span><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>值</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件的绝对路径</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>返回值</span><span lang=EN-US style='font-family:Arial'>:<br> b5052decb577e0fffd622d6ddc017e82<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family: Arial'>:<br> vfs.file.md5sum[/usr/local/etc/zabbix_agentd.conf]<br> </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family: Arial;mso-bidi-font-family:Arial'>文件大小为</span><span lang=EN-US style='font-family:Arial'>(64 MB) </span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>的限制从</span><span lang=EN-US style='font-family:Arial'>1.8.6</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>以后移除</span><span lang=EN-US style='font-family: Arial'>.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.file.regexp[file,regexp,&lt;encoding&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>从文件中查找字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>The whole line from file containing the matched string or EOF if expression not found<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file - full path to file<br> regexp - GNU regular expression<br> encoding - Code Page identifier<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Only the first matching line is returned.<br> Example: vfs.file.regexp[/etc/passwd,zabbix]<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.file.regmatch[file,regexp,&lt;encoding&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>从文件中查找字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=61 colspan=6 style='width:60.65pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>0 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>没有找到</span><span lang=EN-US style='font-family: Arial'><br> 1 - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>找到</span><span lang=EN-US style='font-family:Arial'>d<o:p></o:p></span></p> </td> <td width=127 colspan=10 style='width:126.85pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件的绝对路径</span><span lang=EN-US style='font-family: Arial'><br> regexp - GNU </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>正则表达式</span><span lang=EN-US style='font-family:Arial'><br> encoding - </span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件的编码</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=156 colspan=3 style='width:156.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal align=left style='text-align:left'><span style='font-family: 宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>例子</span><span lang=EN-US style='font-family:Arial'>: vfs.file.regmatch[/var/log/app.log,error]<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.file.size[file]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件大小</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>大小的单位是</span><span lang=EN-US style='font-family:Arial'> bytes.<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file -</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件的绝对路径</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Zabbix</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>用户必须对文件有读取的权限</span><span lang=EN-US style='font-family:Arial'><br> Example: vfs.file.size[/var/log/syslog]<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.file.time[file,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文件的时间信息</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Unix </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>时间戳</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>file - full path to the file<br> mode - one of modify (default, modification time), access - last access time, change - last change time<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Example: vfs.file.time[/etc/passwd,modify]<br> <br> The file size limit depends on <a href="https://www.zabbix.com/documentation/2.0/manual/appendix/items/large_file_support"><span style='color:windowtext;text-decoration:none;text-underline:none'>large file support</span></a>.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.fs.discovery<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=228 colspan=16 style='width:228.05pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>列出已经挂载的文件用于</span><span lang=EN-US style='font-family:Arial'> low-level discovery.<o:p></o:p></span></p> </td> <td width=57 colspan=2 style='width:56.85pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>JSON</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>对象</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=12 style='width:12.0pt;border-top:none;border-left:none;border-bottom: solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt;mso-border-top-alt:solid #8CACBB .75pt; mso-border-left-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=92 style='width:92.3pt;border-top:none;border-left:none;border-bottom: solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt;mso-border-top-alt:solid #8CACBB .75pt; mso-border-left-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><span style="mso-spacerun:yes"> </span>2.0.</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>以上版本支持</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.fs.inode[fs,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Inodes</span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>数量</span><span lang=EN-US style='font-family: Arial'><o:p></o:p></span></p> </td> <td width=29 colspan=2 style='width:29.3pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>数值</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=133 colspan=12 style='width:133.35pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>fs - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件系统</span><span lang=EN-US style='font-family: Arial'><br> mode - total (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), free, used, pfree (free, percentage), pused (used, percentage)<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>例子</span><span lang=EN-US style='font-family:Arial'>: vfs.fs.inode[/,pfree] Old naming: vfs.fs.inode.free[*], vfs.fs.inode.pfree[*], vfs.fs.inode.total[*]<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vfs.fs.size[fs,&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>磁盘空间</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Disk space </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>单位</span><span lang=EN-US style='font-family: Arial'>bytes<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>fs - </span><span style='font-family:宋体;mso-ascii-font-family:Arial;mso-hansi-font-family:Arial; mso-bidi-font-family:Arial'>文件系统</span><span lang=EN-US style='font-family: Arial'><br> mode - total (</span><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>默认</span><span lang=EN-US style='font-family:Arial'>), free, used, pfree (free, percentage), pused (used, percentage)<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>In case of a mounted volume, disk space for local file system is returned. Example: vfs.fs.size[/tmp,free] Old naming: vfs.fs.free[*], vfs.fs.total[*], vfs.fs.used[*], vfs.fs.pfree[*], vfs.fs.pused[*]<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>vm.memory.size[&lt;mode&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>内存大小</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Memory size in bytes or in percentage from total<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>mode - one of total (default), active, anon, buffers, cached, exec, file, free, inactive, pinned, shared, wired, used, pused, available, pavailable<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Old naming: vm.memory.buffers, vm.memory.cached, vm.memory.free, vm.memory.shared, vm.memory.total<br> <br> Item vm.memory.size[] accepts three categories of parameters.<br> <br> First category consists of total - total amount of memory.<br> <br> Second category contains platform-specific memory types:active, anon, buffers, cached, exec, file, free, inactive,pinned, shared, wired.<br> <br> Third category are user-level estimates on how much memory is used and available: used, pused, available, pavailable.<br> <br> See a more detailed description of <a href="https://www.zabbix.com/documentation/2.0/manual/appendix/items/vm.memory.size_params"><span style='color:windowtext;text-decoration:none;text-underline:none'>vm.memory.size parameters</span></a>.<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>web.page.get[host,&lt;path&gt;,&lt;port&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>获取</span><span lang=EN-US style='font-family:Arial'>web</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>页面的内容</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>文本</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>host - hostname<br> path - path to HTML document (default is /)<br> port - port number (default is 80)<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Returns EOF on fail. Example:<br> web.page.get[www.zabbix.com,index.php,80]<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>web.page.perf[host,&lt;path&gt;,&lt;port&gt;]<o:p></o:p></span></p> </td> </tr> <tr> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>获取</span><span lang=EN-US style='font-family:Arial'>web</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>页面打开的响应速度</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Time in seconds<o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>host - hostname<br> path - path to HTML document (default is /)<br> port - port number (default is 80)<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Returns 0 on fail. Example:<br> web.page.perf[www.zabbix.com,index.php,80]<o:p></o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#FEFEFE;mso-shading:windowtext;mso-pattern:solid white;padding: 2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> </tr> <tr> <td width=420 colspan=21 style='width:419.65pt;border:solid #8CACBB 1.0pt; border-top:none;mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>web.page.regexp[host,&lt;path&gt;,&lt;port&gt;,&lt;regexp&gt;,&lt;length&gt;]<o:p></o:p></span></p> </td> </tr> <tr style='mso-yfti-lastrow:yes'> <td width=30 style='width:30.45pt;border:solid #8CACBB 1.0pt;border-top:none; mso-border-top-alt:solid #8CACBB .75pt;mso-border-alt:solid #8CACBB .75pt; background:#0212C9;mso-shading:windowtext;mso-pattern:solid #C0CCDD; padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'><o:p>&nbsp;</o:p></span></p> </td> <td width=45 style='width:45.35pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>从</span><span lang=EN-US style='font-family:Arial'>web</span><span style='font-family:宋体; mso-ascii-font-family:Arial;mso-hansi-font-family:Arial;mso-bidi-font-family: Arial'>页面过滤字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=66 colspan=7 style='width:66.1pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span style='font-family:宋体;mso-ascii-font-family:Arial; mso-hansi-font-family:Arial;mso-bidi-font-family:Arial'>匹配字符串</span><span lang=EN-US style='font-family:Arial'><o:p></o:p></span></p> </td> <td width=97 colspan=7 style='width:96.55pt;border-top:none;border-left:none; border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>host - hostname<br> path - path to HTML document (default is /)<br> port - port number (default is 80)<br> regexp - GNU regular expression<br> length - maximum number of characters to return<o:p></o:p></span></p> </td> <td width=181 colspan=5 style='width:181.2pt;border-top:none;border-left: none;border-bottom:solid #8CACBB 1.0pt;border-right:solid #8CACBB 1.0pt; mso-border-top-alt:solid #8CACBB .75pt;mso-border-left-alt:solid #8CACBB .75pt; mso-border-alt:solid #8CACBB .75pt;background:#FEFEFE;mso-shading:windowtext; mso-pattern:solid white;padding:2.25pt 2.25pt 2.25pt 2.25pt'> <p class=MsoNormal><span lang=EN-US style='font-family:Arial'>Returns EOF on fail (no match). Example:<br> web.page.regexp[www.zabbix.com,index.php,80,OK,2]<o:p></o:p></span></p> </td> </tr> <![if !supportMisalignedColumns]> <tr height=0> <td width=30 style='border:none'></td> <td width=47 style='border:none'></td> <td width=51 style='border:none'></td> <td width=1 style='border:none'></td> <td width=7 style='border:none'></td> <td width=10 style='border:none'></td> <td width=10 style='border:none'></td> <td width=27 style='border:none'></td> <td width=10 style='border:none'></td> <td width=13 style='border:none'></td> <td width=15 style='border:none'></td> <td width=24 style='border:none'></td> <td width=23 style='border:none'></td> <td width=8 style='border:none'></td> <td width=7 style='border:none'></td> <td width=7 style='border:none'></td> <td width=31 style='border:none'></td> <td width=7 style='border:none'></td> <td width=74 style='border:none'></td> <td width=19 style='border:none'></td> <td width=132 style='border:none'></td> </tr> <![endif]> </table> <p class=MsoNormal><span lang=EN-US><o:p>&nbsp;</o:p></span></p> </div> </body> </html> <file_sep>omsa安装参考 http://linux.dell.com/repo/hardware/omsa.html <file_sep># assuming python and pip are already installed # installing the instantclient is usually where problems happen # download the following files from oracle # # oracle-instantclient11.2-basic-11.2.0.3.0-1.x86_64.rpm # oracle-instantclient11.2-devel-11.2.0.3.0-1.x86_64.rpm # oracle-instantclient11.2-sqlplus-11.2.0.3.0-1.x86_64.rpm # install the rpms rpm -ivh oracle-instantclient11.2-basic-11.2.0.3.0-1.x86_64.rpm rpm -ivh oracle-instantclient11.2-sqlplus-11.2.0.3.0-1.x86_64.rpm rpm -ivh oracle-instantclient11.2-devel-11.2.0.3.0-1.x86_64.rpm # the sqlplus package isn't specifically needed, but is usually useful for testing and command line sql connections # configure oracle env (modify exact path based on version of rpm you download) vim /etc/profile.d/oracle #!/bin/bash LD_LIBRARY_PATH="/usr/lib/oracle/11.2/client64/lib:${LD_LIBRARY_PATH}" export LD_LIBRARY_PATH TNS_ADMIN="/etc/oracle" export TNS_ADMIN ORACLE_HOME="/usr/lib/oracle/11.2/client64/lib" export ORACLE_HOME # copy/create your tnsnames.ora file touch /etc/oracle/tnsnames.ora # symlink headers to ORACLE_HOME to avoid "cannot locate Oracle include files" error mkdir /usr/lib/oracle/11.2/client64/lib/sdk ln -s /usr/include/oracle/11.2/client64 /usr/lib/oracle/11.2/client64/lib/sdk/include # done. Install cx_Oracle pip install cx_Oracle #https://gist.github.com/jarshwah/3863378 <file_sep>#!/bin/bash # # status1=$(ps aux|grep "/usr/sbin/zabbix_server" | grep -v grep | grep -v bash | wc -l) if [ "${status1}" = "0" ]; then /etc/init.d/zabbix-server start sleep 3 status2=$(ps aux|grep zabbix_server | grep -v grep | grep -v bash |wc -l) if [ "${status2}" = "0" ]; then /etc/init.d/keepalived stop fi fi <file_sep>#!/bin/bash # function:monitor php-fpm status from zabbix # License: GPL # mail:<EMAIL> # date:2013-05-30 source /etc/bashrc >/dev/null 2>&1 source /etc/profile >/dev/null 2>&1 LOG_FILE=/var/log/zabbix/phpfpmstatus.log curl http://127.0.0.1/phpfpmstatus >${LOG_FILE} 2>&1 pool(){ awk '/pool/ {print $NF}' ${LOG_FILE} } process_manager() { awk '/process manager/ {print $NF}' ${LOG_FILE} } start_since(){ awk '/^start since:/ {print $NF}' ${LOG_FILE} } accepted_conn(){ awk '/^accepted conn:/ {print $NF}' ${LOG_FILE} } listen_queue(){ awk '/^listen queue:/ {print $NF}' ${LOG_FILE} } max_listen_queue(){ awk '/^max listen queue:/ {print $NF}' ${LOG_FILE} } listen_queue_len(){ awk '/^listen queue len:/ {print $NF}' ${LOG_FILE} } idle_processes(){ awk '/^idle processes:/ {print $NF}' ${LOG_FILE} } active_processes(){ awk '/^active processes:/ {print $NF}' ${LOG_FILE} } total_processes(){ awk '/^total processes:/ {print $NF}' ${LOG_FILE} } max_active_processes(){ awk '/^max active processes:/ {print $NF}' ${LOG_FILE} } max_children_reached(){ awk '/^max children reached:/ {print $NF}' ${LOG_FILE} } case "$1" in pool) pool ;; process_manager) process_manager ;; start_since) start_since ;; accepted_conn) accepted_conn ;; listen_queue) listen_queue ;; max_listen_queue) max_listen_queue ;; listen_queue_len) listen_queue_len ;; idle_processes) idle_processes ;; active_processes) active_processes ;; total_processes) total_processes ;; max_active_processes) max_active_processes ;; max_children_reached) max_children_reached ;; *) echo "Usage: $0 {pool|process_manager|start_since|accepted_conn|listen_queue|max_listen_queue|listen_queue_len|idle_processes|active_processes|total_processes|max_active_processes|max_children_reached}" esac
21be0d77694fd92955fb94aae17c4875f79513fc
[ "Markdown", "Python", "Text", "Shell" ]
34
Python
9618211/zabbix-book
f47da49d92bfd66d013f30d0417820e136f43245
3e00e05893f8fe59405468521fa90503e1079cbe
refs/heads/main
<file_sep>import React from 'react' import {useDispatch, useSelector} from "react-redux" import {ADD_TO_CART, DECREASE_QUANTITY, REMOVE_FROM_CART} from "../../redux/actions"; const Cart = () => { const dispatch = useDispatch() const cart = useSelector(s => s.cart) return ( <div className="container-fluid py-4"> { cart.length ? <> <table className="table"> <thead> <tr> <th scope="col">№</th> <th scope="col">Название</th> <th scope="col">Цена</th> <th scope="col">Количество</th> <th scope="col">Сумма</th> <th scope="col">Удалить</th> </tr> </thead> <tbody> { cart.map((product, idx) => <tr> <th scope="row">{idx + 1}</th> <td>{product.title}</td> <td>{product.price}$</td> <td> <div className="d-flex align-items-center justify-content-between w-50"> <button className="me-2 btn btn-sm btn-light border" onClick={() => dispatch(DECREASE_QUANTITY(idx))}>- </button> <span>{product.quantity}</span> <button className="ms-2 btn btn-sm btn-light border" onClick={() => dispatch(ADD_TO_CART(product))}>+ </button> </div> </td> <td>{(product.price * product.quantity).toFixed(2)}$</td> <td> <button className="btn btn-sm btn-danger fw-bolder" onClick={() => dispatch(REMOVE_FROM_CART(product.id))}> Удалить </button> </td> </tr> ) } </tbody> </table> <p className="container text-md-end fw-bold"><span className="me-5 pe-4">Итог: { cart.reduce((acc, item) => { return (item.price * item.quantity) + acc }, 0 ).toFixed(2) }$</span></p> </> : <p className="text-center fw-bold mt-3">Корзина пуста, добавьте товар</p> } </div> ); }; export default Cart;<file_sep>import axios from "axios"; export const GET_CATALOG = () => { return (dispatch) => { dispatch({type: "GET_CATALOG_LOADING"}) axios(`https://61530f5bc465200017d1a969.mockapi.io/catalog`) .then(({data}) => { return dispatch({type: "GET_CATALOG", payload: data}) } ) } } export const ADD_TO_CART = (product) => { return {type: "ADD_TO_CART", payload: product} } export const DECREASE_QUANTITY = (idx) => { return {type: "DECREASE_QUANTITY", payload: idx} } export const REMOVE_FROM_CART = (id) => { return {type: "REMOVE_FROM_CART", payload: id} }
9f68cbd9a11b195e533c0c410562b3c07297499e
[ "JavaScript" ]
2
JavaScript
salymbaeva17/e-commerce
403385ceabfe10a2384aacc085b314a6b98dae92
b664b8ceaa16121aab2eb1c8d55d3601aae94c1c
refs/heads/main
<repo_name>anikbhaya/Car-Rental-Website<file_sep>/src/Components/Cars/Cars.js import React from 'react'; import './Cars.css' const Cars = (props) => { const { car } = props const { carMake, carModel, carModelYear, driverName, rentPerDay, image } = car return ( <div className="card card-item shadow-lg p-2"> <img className="" src={image} className="card-img-top img-fluid" alt={carModel} /> <table className="table"> <tbody> <tr> <th colSpan="2"> <h5 className="card-title text-center">Model: {carModel}</h5> </th> </tr> <tr> <th scope="row">Car Make :</th> <td>{carMake}</td> </tr> <tr> <th scope="row">Model Year :</th> <td>{carModelYear}</td> </tr> <tr> <th scope="row">Driver Name :</th> <td>{driverName}</td> </tr> </tbody> </table> <div className="text-center"> <p className="mb-0">Rent Per Day</p> <h2 className="fs-1">${rentPerDay}</h2> </div> <div className="card-body d-flex justify-content-center"> <div> <button onClick={() => props.handleAddtoCart(car)} className="btn btn-warning"><i className="fas fa-cart-plus"></i> add to cart</button> </div> </div> </div> ); }; export default Cars;<file_sep>/src/Components/Header/Header.js import React from 'react'; import './Header.css' const Header = () => { return ( <div id="header" className="container-lg text-center mt-4"> <div> <h6>Find Your Car Rental Today!</h6> <h1 className="fw-bold uppercase my-0">Brands You Trust At Prices You'll Love</h1> <br /> </div> </div> ); }; export default Header;<file_sep>/src/Components/Cartitem/Cartitem.js import React from 'react'; import './Cartitem.css' const Cartitem = (props) => { const {rentPerDay, image, carModel} = props.product return ( <div className="card mb-3"> <div className="row g-0 d-flex align-items-center"> <div className="col-md-4"> <img src={image} className="img-fluid rounded-start" alt={carModel} /> </div> <div className="col-md-8 d-flex justify-content-between align-items.center px-1"> <h6 className="m-0">{carModel}</h6> <p className="m-0">${rentPerDay}/Day</p> <i onClick={() => props.removeFromCart(props.product.carModel)} id="close" className="fas fa-times"></i> </div> </div> </div> ); }; export default Cartitem;<file_sep>/src/Components/Cart/Cart.js import React from 'react'; import Cartitem from '../Cartitem/Cartitem'; import './Cart.css' const Cart = (props) => { const { cart } = props; let totalCost = 0; cart.forEach(product => totalCost += product.rentPerDay) return ( <div id="cart" className="card shadow-lg"> <table className="table"> <tbody> <tr className="p-2"> <th colSpan="2"> <h5 className="card-title text-center"><i className="fas fa-car"></i> Cars Added: {cart.length} </h5> </th> </tr> <tr className="p-2"> <th scope="row">Total Rent Cost :</th> <th>${totalCost}</th> </tr> </tbody> </table> <div className="px-2"> { cart.map(product => <Cartitem key={product.id} product={product} removeFromCart={props.removeFromCart}></Cartitem>) } </div> <div className="card-body d-flex justify-content-center"> <button className="btn btn-warning">Proceed to checkout</button> </div> </div> ); }; export default Cart;<file_sep>/src/Components/Alert/Alert.js import React from 'react'; const Alert = () => { return ( <div> <div id="alert" className="shadow-lg"> <svg xmlns="http://www.w3.org/2000/svg" className="d-none"> <symbol id="exclamation-triangle-fill" fill="currentColor" viewBox="0 0 16 16"> <path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z" /> </symbol> </svg> <div className="alert alert-danger d-flex align-items-center" role="alert"> <svg className="bi flex-shrink-0 me-2" width="24" height="24" role="img" aria-label="Warning:"><use xlinkHref="#exclamation-triangle-fill" /></svg> <div> An example warning alert with an icon </div> </div> </div> </div> ); }; export default Alert;<file_sep>/src/Components/Showcase/Showcase.js import React, { useEffect, useState } from 'react'; import { toast, ToastContainer } from 'react-toastify'; import 'react-toastify/dist/ReactToastify.css'; import Alert from '../Alert/Alert'; import Cars from '../Cars/Cars'; import Cart from '../Cart/Cart'; import './Showcase.css' toast.configure() const Showcase = () => { const [cars, setCars] = useState([]) const [cart, setCart] = useState([]) const [search, setSearch] = useState([]) useEffect(() => { fetch('./rentData.json') .then(res => res.json()) .then(data => { setCars(data) setSearch(data) }) }, []) const notify = () => toast.warn("Already Added to Cart", { position: toast.POSITION.TOP_CENTER, autoClose: 2000 }); const handleAddtoCart = (car) => { let newCart = [...cart]; const isExist = newCart.find(item => item.id === car.id) if(isExist === undefined){ newCart.push(car) setCart(newCart) }else{ notify() } } const handleChange = (event) => { const searchText = event.target.value; const result = cars.filter(car => car.carMake.toLowerCase().includes(searchText.toLowerCase()) || car.carModel.toLowerCase().includes(searchText.toLowerCase())) setSearch(result) } const removeFromCart = (productId) => { let newCart = [...cart]; const remaining = newCart.filter(pw => pw.carModel !== productId) setCart(remaining) } return ( <div id="showcase-container" className="container-lg"> <div className="input-group pb-4 w-50 m-auto"> <input onChange={handleChange} type="text" className="form-control" placeholder="Search Here..."/> <button className="btn btn-warning">Search</button> </div> <div className="row px-3"> <div className="col-7 col-md-8 col-lg-9 cars-showcase"> { search.map(car => <Cars key={car.id} car={car} handleAddtoCart={handleAddtoCart} ></Cars>) } </div> <div className="col-5 col-md-4 col-lg-3 cart"> <Cart cart={cart} removeFromCart={removeFromCart}></Cart> </div> </div> </div> ); }; export default Showcase;<file_sep>/README.md # Car Rental Web App. View Live Website Here [Car Rental Website](https://car-rental-anikbhaya.netlify.app/). ### Framework Used * React Js * Bootstrap * Font Awesome ### About Website * Show cars of differnt brand for rent * Can add multiple cars to cart * on cart, it will show added carts details dynamically.
af05705667824d0e6b74155e6c30297fee5723a5
[ "JavaScript", "Markdown" ]
7
JavaScript
anikbhaya/Car-Rental-Website
097017fb21dc359f385d5b95c5f44d4c36b42dfd
c86ef127c77c31f96e0cdcfb295862d235dce7ce
refs/heads/master
<repo_name>cafaray/ftcgedoc<file_sep>/db/ctrldoce.sql -- ----------------------------------------------------- -- -- BASE DE DATOS PARA CONTROL DE DOCUMENTOS ELECTRONICOS -- -- DESARROLLADO POR FARIAS TELECOMUNICACIONES Y COMPUTO -- -- FECHA DE CREACION: 07 mayo 2013 -- -- ARCHIVO SCRIPT PARA LA GENERACI�N DE ENTORNO DE -- -- TRABAJO DE LA BASE DE DATOS. LENGUAJE MYSQL -- -- ----------------------------------------------------- -- DROP DATABASE IF EXISTS ctrldoce; CREATE DATABASE ctrldoce CHARACTER SET = 'latin1'; USE ctrldoce; GRANT SELECT, INSERT, UPDATE, EXECUTE ON ctrldoce.* TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; GRANT DELETE ON ctrldoce.jdem10t TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; GRANT DELETE ON ctrldoce.jctm01t TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; GRANT DELETE ON ctrldoce.jctm02t TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; GRANT DELETE ON ctrldoce.jctm03t TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; GRANT DELETE ON ctrldoce.jctm3at TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; GRANT DELETE ON ctrldoce.jctm09t TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; GRANT DELETE ON ctrldoce.jctm10t TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; GRANT SELECT ON ctrldoce.jctc10v TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; -- GRANT SELECT, INSERT, UPDATE, EXECUTE ON fegedoc.* TO 'sysadmindoce'@'%' IDENTIFIED BY 'Sv6lOu/Vs'; -- GRANT DELETE ON fegedoc.jdem10t TO 'sysadmindoce'@'%' IDENTIFIED BY 'Sv6lOu/Vs'; -- GRANT DELETE ON fegedoc.jctm01t TO 'sysadmindoce'@'%' IDENTIFIED BY 'Sv6lOu/Vs'; -- GRANT DELETE ON fegedoc.jctm09t TO 'sysadmindoce'@'%' IDENTIFIED BY 'Sv6lOu/Vs'; -- GRANT SELECT ON fegedoc.jctc10v TO 'sysadmindoce'@'%' IDENTIFIED BY 'Sv6lOu/Vs'; CREATE FUNCTION getUser() RETURNS VARCHAR(32) DETERMINISTIC RETURN SUBSTRING(user(),1,INSTR(user(),'@')-1); -- Control de secuencia CREATE TABLE kaqcidt ( cdobjeto CHAR(2) NOT NULL, feregistro DATETIME NOT NULL, dsobjeto VARCHAR(70) NOT NULL, idactual CHAR(16) NOT NULL, inactivo CHAR(1) NOT NULL, cdusuari CHAR(32) NOT NULL, programa VARCHAR(45) NOT NULL, tmpstmp DATETIME NOT NULL, PRIMARY KEY(cdobjeto) )ENGINE=INNODB; -- Control de sesion CREATE TABLE jpem90t ( idsesion VARCHAR(32) NOT NULL, cdusulog CHAR(16) NOT NULL, dsipfrom VARCHAR(15) NOT NULL, dsusulog VARCHAR(100) NOT NULL, cdusuari VARCHAR(32) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idsesion) )ENGINE=INNODB; -- Empresa CREATE TABLE jpem00t ( cdperson CHAR(16) NOT NULL, dsrazsoc VARCHAR(120) NOT NULL, dsrfc VARCHAR(13) NOT NULL, dsfolder VARCHAR(16) NOT NULL, dslogo VARCHAR(35) NOT NULL, isowner ENUM('S','N') NOT NULL DEFAULT 'N', intipprs ENUM('-','C','P') NOT NULL DEFAULT '-', cdusuari VARCHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdperson) )ENGINE=INNODB; -- Contacto CREATE TABLE jpem10t ( cdperson CHAR(16) NOT NULL, cdcontac CHAR(16) NOT NULL, dsfirst VARCHAR(35) NOT NULL, dslast VARCHAR(60) NOT NULL, dsmail VARCHAR(100) NOT NULL, dstelloc VARCHAR(60) NOT NULL DEFAULT '', dstelmov VARCHAR(14) NOT NULL DEFAULT '', cdusuari CHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, dsipfrom VARCHAR(15) NOT NULL, PRIMARY KEY(cdperson,cdcontac), FOREIGN KEY(cdperson) REFERENCES jpem00t(cdperson) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; -- Grupo CREATE TABLE jgrm01t ( cdidegrp CHAR(1) NOT NULL, dsidegrp VARCHAR(35) NOT NULL, ingrpmod INT NOT NULL DEFAULT 0, cdusuari VARCHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdidegrp) )ENGINE=INNODB; -- Usuario CREATE TABLE jusm01t ( cdperson CHAR(16) NOT NULL, cdcontac CHAR(16) NOT NULL, cdidegrp CHAR(1) NOT NULL, cdusulog VARCHAR(100) NOT NULL, dsvalenc VARCHAR(64) NOT NULL, instatus ENUM('A','E','S') NOT NULL DEFAULT 'A', inusumod INT NOT NULL DEFAULT 0, dsipfrom VARCHAR(15) NOT NULL, cdusuari CHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdperson,cdcontac), FOREIGN KEY(cdperson,cdcontac) REFERENCES jpem10t(cdperson,cdcontac) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; -- Documento electr�nico CREATE TABLE jdem10t ( cdperson CHAR(16) NOT NULL, cddocele CHAR(16) NOT NULL, dsfiles VARCHAR(500) NOT NULL COMMENT 'Nombre original de los archivos', dstitle VARCHAR(35) NOT NULL DEFAULT 'Documento', dsobserv VARCHAR(2000) NOT NULL DEFAULT '' COMMENT 'Descripci�n detallada del documento', instatus CHAR(1) NOT NULL, tschgstt DATETIME NULL, cdusuari CHAR(16) NOT NULL, dsipfrom VARCHAR(15) NOT NULL, cdusumod CHAR(16) NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdperson,cddocele), FOREIGN KEY(cdperson) REFERENCES jpem00t(cdperson) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; -- Solicitud registro proveedores CREATE TABLE jsegsot ( cdsolres VARCHAR(64) NOT NULL, dsmail VARCHAR(120) NOT NULL, dsrfc VARCHAR(13) NOT NULL, tsfecsol DATETIME NOT NULL, tsfecdwn DATETIME NOT NULL, instatus CHAR(1) NOT NULL, dsagent VARCHAR(500) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdsolres) ); -- START PROCEDURES AND FUNCTIONS DELIMITER // DROP PROCEDURE IF EXISTS ingresaSolicitudReset; CREATE PROCEDURE ingresaSolicitudReset(IN correo VARCHAR(120), IN rfc VARCHAR(13), IN agente VARCHAR(500), OUT referencia VARCHAR(64), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = correo AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); IF(existe>0)THEN IF (agente='')THEN SET error = 'Error 50051: No se han especificado todos los valores requeridos.'; ELSE SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE dsmail = correo AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP); IF(existe>0)THEN SET error = 'Error 50054: Hay una solicitud pendiente, no se puede registrar otra.'; ELSE SELECT CURRENT_TIMESTAMP INTO cts; SELECT CONCAT(MD5(CONCAT(cts,'zreset')),MD5(CONCAT(cts,correo))) INTO referencia; INSERT INTO jsegsot (cdsolres,dsmail,dsrfc,tsfecsol,tsfecdwn,instatus,dsagent,tmstmp) VALUES (referencia,correo,rfc,CURRENT_TIMESTAMP,ADDTIME(CURRENT_TIMESTAMP, '24:00:000.0'),'A',agente,cts); SET error = ''; END IF; END IF; ELSE SET error = 'Error 50050: Este usuario no se encuentra registrado.'; END IF; END; // DROP PROCEDURE IF EXISTS aplicaSolicitudReset; CREATE PROCEDURE aplicaSolicitudReset(IN identificador VARCHAR(64), IN valenc VARCHAR(16), IN ipfrom VARCHAR(15), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE contacto CHAR(16) DEFAULT ''; DECLARE rfc VARCHAR(13) DEFAULT ''; DECLARE usuario VARCHAR(100) DEFAULT ''; SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE cdsolres = identificador AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP) ; IF(existe>0)THEN SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE cdsolres = identificador AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP) AND instatus = 'A'; IF(existe>0)THEN SELECT dsmail, dsrfc INTO usuario, rfc FROM jsegsot WHERE cdsolres = identificador; SELECT cdcontac INTO contacto FROM jpem10t WHERE dsmail = usuario; UPDATE jusm01t SET dsvalenc = CONCAT(MD5(CONCAT(contacto,tmstmp)),MD5(valenc)), programa = CONCAT('RSPWD',ipfrom) WHERE cdusulog = usuario AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); SET error = ''; ELSE SET error = 'Error 50053: Esta solicitud ya fue aplicada.'; END IF; ELSE SET error = 'Error 50052: Esta solicitud ya no existe.'; END IF; END; // DROP PROCEDURE IF EXISTS listaDocumentos; CREATE PROCEDURE listaDocumentos(IN persona CHAR(16), IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0) THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(A.tmstmp) FROM jdem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE intipprs = tipo ORDER BY empresa, A.tmstmp desc; ELSE SELECT cdperson, nombreEmpresa(cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(tmstmp) FROM jdem10t WHERE cdperson = persona ORDER BY tmstmp desc; END IF; ELSE SELECT 'Sesi�n no valida.','','','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaDocumentosFiltro; CREATE PROCEDURE listaDocumentosFiltro(IN persona CHAR(16), IN tipo CHAR(1), IN fechai DATE, IN fechaf DATE, IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0) THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(A.tmstmp) FROM jdem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE intipprs = tipo AND DATE(A.tmstmp) BETWEEN fechai AND fechaf ORDER BY empresa, A.tmstmp desc; ELSE SELECT cdperson, nombreEmpresa(cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(tmstmp) FROM jdem10t WHERE cdperson = persona AND DATE(tmstmp) BETWEEN fechai AND fechaf ORDER BY tmstmp desc; END IF; ELSE SELECT 'Sesi�n no valida.','','','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS registraDocumento; CREATE PROCEDURE registraDocumento(IN persona CHAR(16), IN archivo VARCHAR(500), IN titulo VARCHAR(35), IN observaciones VARCHAR(2000), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT getCodigoApp('DE') INTO referencia; INSERT INTO jdem10t (cdperson,cddocele,dsfiles,dstitle,dsobserv,instatus,cdusuari,dsipfrom,programa,tmstmp) VALUES (persona,referencia,archivo,titulo,observaciones,'A',obtieneUsuario(sesion),obtieneIp(sesion),'registraDocumento',CURRENT_TIMESTAMP); SET error = ''; ELSE SET error = 'Error 50039: La sesi�n no es correcta. Restablezca la aplicaic�n.'; END IF; END; // DROP PROCEDURE IF EXISTS registraCabeceraFactura; CREATE PROCEDURE registraCabeceraFactura(IN persona CHAR(16),IN documento VARCHAR(35),IN archivo VARCHAR(500),IN tipo VARCHAR(100),IN serie VARCHAR(60),IN folio INT, IN fecha VARCHAR(30), IN formaPago VARCHAR(250), IN subTotal DOUBLE(13,2), IN descuento DOUBLE(13,2), IN tipoCambio DOUBLE(13,2), IN total DOUBLE(16,2), IN moneda VARCHAR(30), IN metodo VARCHAR(250), IN expedicion VARCHAR(250), IN rfcemisor VARCHAR(13), IN emisor VARCHAR(120), IN rfcreceptor VARCHAR(13), IN receptor VARCHAR(120),IN impuesto DOUBLE(13,2), IN uuid VARCHAR(40),IN timbre VARCHAR(21), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(cdfile) INTO existe FROM jdem20t WHERE cdperson=persona AND cddocele=documento; IF (existe>0)THEN SET error = 'Error 50031: Ya existe un registro asociado al documento. No se puede registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('FL') INTO referencia; SELECT CURRENT_TIMESTAMP INTO cts; INSERT INTO jdem20t (cdperson,cddocele,cdfile,dsfile,dstipo,dsserie,dsfolio,dsfecha,dsfrmpag,dbsubtot,dbdescto,dbtipcam,dbtotal,dsmoneda,dsmetpag,dslugexp,dsrfcemi,dsnomemi,dsrfcrec,dsnomrec, dbimptra,dsuuid,dsfectim,programa,tmstmp,cdusuari) VALUES (persona,documento,referencia,archivo,tipo,serie,folio,fecha,formaPago,subtotal,descuento,tipocambio,total,moneda,metodo,expedicion,rfcemisor,emisor,rfcreceptor,receptor,impuesto,uuid,timbre,'spInsertaFactura',CURRENT_TIMESTAMP,obtieneUsuario(sesion)); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave d�plicada en facturas. Notifique a sistemas.'; END IF; END; // DROP PROCEDURE IF EXISTS getCodigo; CREATE PROCEDURE getCodigo(IN var_objeto char(2), IN var_programa char(12), OUT var_codigo char(16)) BEGIN DECLARE lon int; DECLARE inprestado int; SELECT COUNT(inactivo) INTO inprestado FROM kaqcidt WHERE inactivo='n' AND cdobjeto=var_objeto; WHILE (inprestado=0) DO SELECT COUNT(inactivo) INTO inprestado FROM kaqcidt WHERE inactivo='n' AND cdobjeto=var_objeto; END WHILE; IF (inprestado>0) THEN SELECT (CAST(SUBSTRING(idactual,1,16) AS UNSIGNED) + CAST(SUBSTRING(SIN(CAST(SUBSTRING(idactual,1,16) AS UNSIGNED)),4,1) AS UNSIGNED))+3 INTO var_codigo FROM kaqcidt WHERE cdobjeto = var_objeto; SELECT CONCAT(REPEAT('0', 16-LENGTH(var_codigo)),var_codigo) INTO var_codigo; UPDATE kaqcidt SET inactivo = 's' , idactual = var_codigo, cdusuari = getUser(), programa = var_programa WHERE cdobjeto = var_objeto; END IF; END; // DROP PROCEDURE IF EXISTS setCodigoApp; CREATE PROCEDURE setCodigoApp(var_cdobjeto char(2)) BEGIN UPDATE kaqcidt SET inactivo = 'n' WHERE cdobjeto = var_cdobjeto; END // DROP FUNCTION IF EXISTS getCodigoApp; CREATE FUNCTION getCodigoApp(tipo char(2)) RETURNS CHAR(16) DETERMINISTIC BEGIN CALL getCodigo(tipo,'f(getCodigo)', @x); CALL setCodigoApp(tipo); RETURN @x; END // DROP FUNCTION IF EXISTS obtieneUsuario; CREATE FUNCTION obtieneUsuario(sesion VARCHAR(32)) RETURNS CHAR(16) DETERMINISTIC BEGIN RETURN (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); END // CREATE FUNCTION obtieneIp(sesion CHAR(32)) RETURNS VARCHAR(15) DETERMINISTIC BEGIN RETURN (SELECT dsipfrom FROM jpem90t WHERE idsesion = sesion); END; // DROP PROCEDURE IF EXISTS registraPersona; CREATE PROCEDURE registraPersona(IN razonsocial VARCHAR(120), IN rfc VARCHAR(13), IN tipo ENUM('C','P'), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(dsrfc) INTO existe FROM jpem00t WHERE dsrfc = rfc; IF(existe>0)THEN SET error = 'Error 50031: Este RFC ya se encuentra registrado, no se puede volver a registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('PR') INTO referencia; INSERT INTO jpem00t (cdperson,dsrazsoc,dsrfc,dsfolder,dslogo,isowner,intipprs,cdusuari,programa,tmstmp) VALUE (referencia,razonsocial,rfc,referencia,'','N',tipo,obtieneUsuario(sesion),'registraPersona',CURRENT_TIMESTAMP); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave d�plicada en personas. Notifique a sistemas.'; END IF; END; // DROP FUNCTION IF EXISTS obtienePropietario; CREATE FUNCTION obtienePropietario(sesion VARCHAR(32)) RETURNS CHAR(16) DETERMINISTIC BEGIN DECLARE existe INT DEFAULT 0; DECLARE persona CHAR(16) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdusulog INTO persona FROM jpem90t WHERE idsesion = sesion; RETURN (SELECT cdperson FROM jpem10t WHERE cdcontac = persona); ELSE RETURN ''; END IF; END; // DROP PROCEDURE IF EXISTS obtienePersonas; CREATE PROCEDURE obtienePersonas(IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN IF (tipo= '-') THEN SELECT COUNT(cdperson) INTO existe FROM jpem00t WHERE isowner = 'S' AND cdperson = obtienePropietario(sesion); IF (existe=1)THEN SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE cdperson = obtienePropietario(sesion) AND isowner = 'S'; ELSE SELECT 'No se permite ','esta ', 'consulta.','-'; END IF; ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE intipprs = tipo AND isowner = 'N'; END IF; ELSE SELECT 'Sesi�n ','no ', 'valida.',''; END IF; END; // DROP PROCEDURE IF EXISTS localizaPersonasPorRFC; CREATE PROCEDURE localizaPersonasPorRFC(IN tipo CHAR(1), IN rfc VARCHAR(35), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE condicion VARCHAR(37) DEFAULT ''; DECLARE micodigo CHAR(16) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdperson INTO micodigo FROM jpem10t WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); SELECT COUNT(isowner) INTO existe FROM jpem00t WHERE cdperson = micodigo AND isowner = 'S'; IF(existe>0)THEN SELECT CONCAT('%',rfc,'%') INTO condicion; IF(tipo='*') THEN SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE isowner = 'N' AND dsrfc LIKE (condicion); ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE intipprs = tipo AND isowner = 'N' AND dsrfc LIKE (condicion); END IF; ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE cdperson = micodigo; END IF; ELSE SELECT 'Sesi�n ','no ', 'valida.',''; END IF; END; // DROP PROCEDURE IF EXISTS localizaPersonas; CREATE PROCEDURE localizaPersonas(IN tipo CHAR(1), IN nombre VARCHAR(35), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE condicion VARCHAR(37) DEFAULT ''; DECLARE micodigo CHAR(16) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdperson INTO micodigo FROM jpem10t WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); SELECT COUNT(isowner) INTO existe FROM jpem00t WHERE cdperson = micodigo AND isowner = 'S'; IF(existe>0)THEN SELECT CONCAT('%',nombre,'%') INTO condicion; IF(tipo='*') THEN SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE isowner = 'N' AND dsrazsoc LIKE (condicion); ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE intipprs = tipo AND isowner = 'N' AND dsrazsoc LIKE (condicion); END IF; ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE cdperson = micodigo; END IF; ELSE SELECT 'Sesi�n ','no ', 'valida.',''; END IF; END; // DROP PROCEDURE IF EXISTS preRegistroContacto; CREATE PROCEDURE preRegistroContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo VARCHAR(1), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE referencia VARCHAR(32); DECLARE error VARCHAR(250); CALL registraContacto(persona,primero,segundo,correo,telefono,movil,grupo,valenc,'preregistro',@referencia,@error); IF(referencia<>'')THEN SET referencia = @referencia; UPDATE jpem01t SET instatus = 'P' WHERE cdperson = persona AND cdcontac = @referencia; ELSE SET error = @error; END IF; END; // DROP PROCEDURE IF EXISTS registraContacto; CREATE PROCEDURE registraContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo VARCHAR(32), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo; IF (existe>0)THEN SET error = 'Error 50031: Este correo electronico ya se encuentra registrado, no se puede volver a registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('CT') INTO referencia; SELECT CURRENT_TIMESTAMP INTO cts; INSERT INTO jpem10t (cdperson,cdcontac,dsfirst,dslast,dsmail,dstelloc,dstelmov,cdusuari,programa,tmstmp,dsipfrom) VALUES (persona,referencia,primero,segundo,correo,telefono,movil,obtieneUsuario(sesion),'registraContacto',cts,obtieneIp(sesion)); INSERT INTO jusm01t (cdperson,cdcontac,cdidegrp,cdusulog,dsvalenc,instatus,inusumod,dsipfrom,cdusuari,programa,tmstmp) VALUES (persona,referencia,grupo,correo,CONCAT(MD5(CONCAT(referencia,cts)),MD5(valenc)),'A',0,obtieneIp(sesion),obtieneUsuario(sesion),'registraContacto',cts); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET error = "Error 50030: Existe una llave duplicada en personas. Notifique a sistemas."; SET referencia = ""; END IF; END; // DROP PROCEDURE IF EXISTS actualizaContacto; CREATE PROCEDURE actualizaContacto(IN identificador CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), IN telefono VARCHAR(60), IN movil VARCHAR(14), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo AND cdcontac <> identificador; IF (existe>0)THEN SET error = 'Error 50031: Este correo electr�nico ya se encuentra registrado, no se puede volver a registrar.'; ELSE UPDATE jpem10t SET dsfirst = primero, dslast = segundo, dsmail = correo, dstelloc = telefono, dstelmov = movil, programa = CONCAT('ACT',sesion) WHERE cdcontac = identificador; UPDATE jusm01t SET cdusulog = correo, programa = CONCAT('ACT',sesion) WHERE cdcontac = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS eliminaContacto; CREATE PROCEDURE eliminaContacto(IN identificador CHAR(16), IN correo VARCHAR(100), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo AND cdcontac = identificador; IF (existe<=0)THEN SET error = 'Error 50032: El correo electr�nico no se encuentra registrado, no se puede eliminar el registro.'; ELSE DELETE FROM jusm01t WHERE cdusulog = correo AND cdcontac = (SELECT cdcontac FROM jpem10t WHERE dsmail = correo AND cdcontac = identificador); DELETE FROM jpem10t WHERE dsmail = correo AND cdcontac = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS obtieneContactos; CREATE PROCEDURE obtieneContactos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE persona CHAR(16) DEFAULT ''; DECLARE tipo CHAR(1) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, intipprs INTO persona, tipo FROM jpem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); CALL obtieneContactosPersona(persona, tipo, sesion); ELSE SELECT '','','Sesi�n no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS obtieneContactosPersona; CREATE PROCEDURE obtieneContactosPersona(IN persona VARCHAR(16), IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac INNER JOIN jpem00t C ON A.cdperson = C.cdperson WHERE intipprs = tipo; ELSE SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson = persona AND persona NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S'); END IF; ELSE SELECT '','','Sesi�n no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaContactos; CREATE PROCEDURE listaContactos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S') AND instatus = 'A'; ELSE SELECT '','','Sesi�n no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaContactosSuspendidos; CREATE PROCEDURE listaContactosSuspendidos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S') AND instatus = 'S'; ELSE SELECT '','','Sesi�n no valida.','','','','',''; END IF; END; // DROP FUNCTION IF EXISTS nombreEmpresa; CREATE FUNCTION nombreEmpresa(empresa CHAR(16)) RETURNS VARCHAR(100) DETERMINISTIC BEGIN RETURN (SELECT dsrazsoc FROM jpem00t WHERE cdperson = empresa); END; // DROP PROCEDURE IF EXISTS actualizaGrupoContacto; CREATE PROCEDURE actualizaGrupoContacto(IN contacto CHAR(16), IN grupo VARCHAR(32), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET cdidegrp = grupo, programa = CONCAT('NWGRP',grupo,obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaAccesoContacto; CREATE PROCEDURE actualizaAccesoContacto(IN usuario VARCHAR(120), IN valenc VARCHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE contacto CHAR(16) DEFAULT ''; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = usuario; IF(existe>0)THEN SELECT cdcontac INTO contacto FROM jpem10t WHERE dsmail = usuario; SELECT COUNT(isowner) INTO existe FROM jpem00t WHERE isowner = 'S' AND cdperson = (SELECT cdperson FROM jpem10t WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion)); IF (existe>0)THEN UPDATE jusm01t SET dsvalenc = CONCAT(MD5(CONCAT(contacto,tmstmp)),MD5(valenc)), programa = CONCAT('NWPWD',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = usuario; SET error = ''; ELSE SET error = 'Error 50040: Acceso a operaci�n denegada.'; END IF; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS suspendeAccesoContacto; CREATE PROCEDURE suspendeAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'S', dsvalenc = CONCAT(MD5(CONCAT(contacto,CURRENT_TIMESTAMP)),MD5('USUARIO-SUSPENDIDO')), programa = CONCAT('MDSUS',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS eliminaAccesoContacto; CREATE PROCEDURE eliminaAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'E', dsvalenc = CONCAT(MD5(CONCAT(CURRENT_TIMESTAMP,'0000000000000000')),MD5('USUARIO-ELIMINADO')), programa = CONCAT('MDELM',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS activaAccesoContacto; CREATE PROCEDURE activaAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'A', programa = CONCAT('MDACT',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS whois; CREATE PROCEDURE whois(IN usuario VARCHAR(100), IN valor VARCHAR(16), IN rfc VARCHAR(13), IN ipfrom VARCHAR(15), IN sesion VARCHAR(32), OUT seguridad INT, OUT referencia CHAR(16), OUT propietario CHAR(1), OUT persona CHAR(16), OUT error VARCHAR(255)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(dsrfc) INTO existe FROM jpem00t WHERE dsrfc = rfc; IF (existe>0)THEN -- SELECT cdperson INTO persona FROM jpem00t WHERE dsrfc = rfc; SELECT COUNT(cdcontac) INTO existe FROM jusm01t WHERE cdusulog = usuario AND dsvalenc = CONCAT(MD5(CONCAT(cdcontac,tmstmp)),MD5(valor)) AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); IF(existe>0)THEN SELECT cdcontac, cdperson INTO referencia, persona FROM jusm01t WHERE cdusulog = usuario AND dsvalenc = CONCAT(MD5(CONCAT(cdcontac,tmstmp)),MD5(valor)) AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); -- SELECT COUNT(cdperson) INTO existe FROM jpem10t WHERE cdperson = persona AND cdcontac = referencia; -- IF(existe>0)THEN SELECT ingrpmod INTO seguridad FROM jgrm01t WHERE cdidegrp = (SELECT cdidegrp FROM jusm01t WHERE cdcontac = referencia); SELECT isowner INTO propietario FROM jpem00t WHERE cdperson = persona; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT COUNT(dsipfrom) INTO existe FROM jpem90t WHERE idsesion = sesion AND dsipfrom = ipfrom; IF(existe>0)THEN SET error = ''; ELSE SET error = 'Error 50043: No se puede validar la sesión. Espere unos minutos para volver a intentarlo.'; END IF; ELSE INSERT INTO jpem90t (idsesion,cdusulog,dsusulog,dsipfrom,cdusuari,programa,tmstmp) VALUES (sesion,referencia,usuario,ipfrom,getUser(),'insertaSesion',CURRENT_TIMESTAMP); SET error = ''; END IF; -- ELSE -- SET error = 'Error 50042:Este usuario no esta asociado al cliente.'; -- END IF; ELSE SET error = (SELECT CONCAT('Error 50041:El usuario o la contraseña son incorrectos en: ', rfc)); END IF; ELSE SET error = 'Error 50040:El registro de empresa no existe.'; END IF; END; // -- CALL whois('<EMAIL>','bui113yt','DQI941121ET8','0.0.0.0','12345678901234567890123456789012',@seguridad,@referencia,@propietario,@persona,@error) -- SELECT COUNT(dsrfc) FROM jpem00t WHERE dsrfc = 'DQI941121ET8'; -- SELECT COUNT(cdcontac) FROM jusm01t WHERE cdusulog = '<EMAIL>' AND dsvalenc = CONCAT(MD5(CONCAT(cdcontac,tmstmp)),MD5('cfar52Bio+')); -- CALL whois('<EMAIL>','cfar52Bio+','FAAC750415PZ0','127.0.0.1','b30c1016944d4a629f647da5a75e',@1,@2,@3,@4,@5); DROP PROCEDURE IF EXISTS listaGrupos; CREATE PROCEDURE listaGrupos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdidegrp, dsidegrp FROM jgrm01t WHERE cdidegrp NOT IN ('*','A') ORDER BY dsidegrp; ELSE SELECT '','La sesion no existe'; END IF; END; // DROP PROCEDURE IF EXISTS consultaValoresSesion; CREATE PROCEDURE consultaValoresSesion(IN sesion VARCHAR(32), OUT identificador CHAR(16), OUT dominio CHAR(16), OUT razonsocial VARCHAR(100), OUT folder CHAR(16), OUT tipoPersona CHAR(1), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, B.cdcontac, A.dsrazsoc, A.dsfolder, A.intipprs INTO dominio, identificador, razonsocial, folder, tipoPersona FROM jpem00t A INNER JOIN jpem10t B ON A.cdperson = B.cdperson INNER JOIN jpem90t C ON B.cdcontac = C.cdusulog WHERE idsesion = sesion; SET error = ''; ELSE SET error = 'Error 50039: La sesi�n no existe.'; END IF; END; // DELIMITER ; INSERT INTO kaqcidt VALUES ('PR',CURRENT_TIMESTAMP,'Controlador de identificacion para clientes','0000000000000009','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO kaqcidt VALUES ('DE',CURRENT_TIMESTAMP,'Controlador de identificacion para documentos','0000000000000007','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO kaqcidt VALUES ('CT',CURRENT_TIMESTAMP,'Controlador de identificacion para contactos','0000000000000011','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO kaqcidt VALUES ('FL',CURRENT_TIMESTAMP,'Controlador de identificacion para archivos','0000000000000009','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); DELETE FROM ctrldoce.jgrm01t; -- Manejo de seguridad en grupos: -- Registro Registro Usuarios -- Contacto Contacto Cambiar passwd -- Ver documento Ver documento Suspender/eliminar usuarios -- Subir documento Subir documento Activar usuarios -- Notificacion Notificacion -- Estado Estado -- GRUPOS: [000000.000000.0000] 0 -- P Proveedores: [011100.000000.0000] 28672 -- Gestor proveedores [111111.000000.0000] 64512 -- C Clientes: [000000.001000.0000] 128 -- Gestor clientes: [000000.111111.0000] 1008 -- A Administrador: [000000.000000.1111] 15 -- Gestor no administrador: [111111.111111.0000] 65520 -- * Control total: [111100.111100.1111] 62407, 62415 INSERT INTO ctrldoce.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('C', 'Clientes','128','sysadmindoce','manual',CURRENT_TIMESTAMP); INSERT INTO ctrldoce.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('P', 'Proveedores','28672','sysadmindoce','manual',CURRENT_TIMESTAMP); INSERT INTO ctrldoce.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('A', 'Administrador','7','sysadmindoce','manual',CURRENT_TIMESTAMP); INSERT INTO ctrldoce.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('*', 'Control total','62407','sysadmindoce','manual',CURRENT_TIMESTAMP); INSERT INTO ctrldoce.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('G', 'Gastos','480','sysadmindoce','manual',CURRENT_TIMESTAMP); -- INSERT INTO ctrldoce.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('v', 'Gestor de Proveedores','64512','sysadmindoce','manual',CURRENT_TIMESTAMP); -- INSERT INTO ctrldoce.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('t', 'Gestor de clientes','1008','sysadmindoce','manual',CURRENT_TIMESTAMP); -- INSERT INTO ctrldoce.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('s', 'Gestor de documentos','65520','sysadmindoce','manual',CURRENT_TIMESTAMP); -- UPDATE ctrldoce.jgrm01t SET ingrpmod = 62415 WHERE cdidegrp = '*'; -- Insertar usuarios due�os de la base de datos: INSERT INTO ctrldoce.jpem00t (cdperson,dsrazsoc,dsrfc,dsfolder,dslogo,isowner,intipprs,cdusuari,programa,tmstmp) VALUE('0000000000000005','Biotecsa S de RL de CV','_BIO870307QD0','','','S','-',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO ctrldoce.jpem10t (cdperson,cdcontac,dsfirst,dslast,dsmail,dstelloc,dstelmov,cdusuari,programa,tmstmp,dsipfrom) VALUE('0000000000000005','0000000000000007','Administrador','Sistema','<EMAIL>','','',getUser(),'START-BATCH',CURRENT_TIMESTAMP,'0.0.0.0'); INSERT INTO ctrldoce.jusm01t (cdperson,cdcontac,cdidegrp,cdusulog,dsvalenc,instatus,inusumod,dsipfrom,cdusuari,programa,tmstmp) VALUE('0000000000000005','0000000000000007','A','<EMAIL>',CONCAT(MD5(CONCAT('0000000000000007',CURRENT_TIMESTAMP)),MD5('hJDy63625')),'A','0','0.0.0.0',getUser(),'START-BATCH',CURRENT_TIMESTAMP); GRANT DELETE ON ctrldoce.jdem10t TO 'sysadmindoce'@'localhost'; <file_sep>/src/java/com/ftc/gedoc/bo/impl/NotificacionBOImpl.java package com.ftc.gedoc.bo.impl; import com.ftc.gedoc.bo.NotificacionBO; import com.ftc.gedoc.dao.NotificacionDAO; import com.ftc.gedoc.dao.impl.NotificacionDAOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Notificacion; import java.util.ArrayList; import java.util.List; public class NotificacionBOImpl implements NotificacionBO{ NotificacionDAO dao = new NotificacionDAOImpl(); public NotificacionBOImpl(){} @Override public Notificacion registrar(Notificacion notificacion) throws GeDocBOException { try{ return dao.registrar(notificacion); }catch(GeDocDAOException e){ throw new GeDocBOException(e.getMessage(), e); } } @Override public Notificacion cancelarNotificacion(Notificacion notificacion) throws GeDocBOException { try{ if(notificacion!=null){ notificacion.setEstatus("C"); return dao.actualizar(notificacion); }else{ throw new GeDocBOException("No se localizo la notificación"); } }catch(GeDocDAOException e){ throw new GeDocBOException(e.getMessage(), e); } } @Override public int cancelarNotificaciones(List<Notificacion> notificaciones) throws GeDocBOException { for(Notificacion notificacion:notificaciones){ cancelarNotificacion(notificacion); } return notificaciones.size(); } @Override public List<Notificacion> notificaciones(String empresa) throws GeDocBOException { try{ List<Notificacion> notificaciones = dao.listar(); List<Notificacion> filtro = new ArrayList<Notificacion>(); for(Notificacion notificacion:notificaciones){ if(empresa.equals(notificacion.getPersona())){ filtro.add(notificacion); } } return filtro; }catch(GeDocDAOException e){ throw new GeDocBOException(e.getMessage(), e); } } @Override public List<Notificacion> notificaciones(String empresa, String estatus) throws GeDocBOException { try{ List<Notificacion> notificaciones = dao.listar(); List<Notificacion> filtro = new ArrayList<Notificacion>(); for(Notificacion notificacion:notificaciones){ if(empresa.equals(notificacion.getPersona()) && estatus.equals(notificacion.getEstatus())){ filtro.add(notificacion); } } return filtro; }catch(GeDocDAOException e){ throw new GeDocBOException(e.getMessage(), e); } } } <file_sep>/src/java/com/ftc/gedoc/utiles/Contacto.java package com.ftc.gedoc.utiles; public class Contacto { private String persona; private String razonSocial; private String identificador; private String nombre; private String apellido; private String correo; private String telefono; private String movil; private String grupo; public Contacto() { this.persona = ""; this.identificador = ""; this.nombre = ""; this.apellido = ""; this.correo = ""; this.telefono = ""; this.movil = ""; this.grupo = ""; } public Contacto(String persona, String nombre, String apellido, String correo, String telefono, String movil, String grupo) { this.persona = persona; this.nombre = nombre; this.apellido = apellido; this.correo = correo; this.telefono = telefono; this.movil = movil; this.grupo = grupo; } public Contacto(String nombre, String apellido, String correo, String telefono, String movil) { this.nombre = nombre; this.apellido = apellido; this.correo = correo; this.telefono = telefono; this.movil = movil; } public String getPersona() { return persona; } public void setPersona(String persona) { this.persona = persona; } public String getRazonSocial() { return razonSocial; } public void setRazonSocial(String razonSocial) { this.razonSocial = razonSocial; } public String getIdentificador() { return identificador; } public void setIdentificador(String identificador) { this.identificador = identificador; } public String getNombre() { return nombre; } public void setNombre(String nombre) { this.nombre = nombre; } public String getApellido() { return apellido; } public void setApellido(String apellido) { this.apellido = apellido; } public String getCorreo() { return correo; } public void setCorreo(String correo) { this.correo = correo; } public String getTelefono() { return telefono; } public void setTelefono(String telefono) { this.telefono = telefono; } public String getMovil() { return movil; } public void setMovil(String movil) { this.movil = movil; } public String getGrupo() { return grupo; } public void setGrupo(String grupo) { this.grupo = grupo; } /* public static List<Contacto> obtieneContactos(Connection conexion, String sesion) throws SQLException { List<Contacto> listado = new LinkedList<Contacto>(); SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, sesion)); ResultSet rst = Conexion.consultaStoreProcedure(conexion, "obtieneContactos", params); while (rst.next()) { Contacto c = new Contacto(); c.setPersona(rst.getString(1)); c.setRazonSocial(rst.getString(2)); c.setIdentificador(rst.getString(3)); c.setNombre(rst.getString(4)); c.setApellido(rst.getString(5)); c.setCorreo(rst.getString(6)); c.setTelefono(rst.getString(7)); c.setMovil(rst.getString(8)); c.setGrupo(rst.getString(9)); listado.add(c); } return listado; } public static List<Contacto> obtieneContactos(String empresa, String tipo, Connection conexion, String sesion) throws SQLException { List<Contacto> listado = new LinkedList<Contacto>(); SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, empresa)); params.add(new SpParam(2, Types.VARCHAR, tipo)); params.add(new SpParam(3, Types.VARCHAR, sesion)); ResultSet rst = Conexion.consultaStoreProcedure(conexion, "obtieneContactosPersona", params); while (rst.next()) { Contacto c = new Contacto(); c.setPersona(rst.getString(1)); c.setRazonSocial(rst.getString(2)); c.setIdentificador(rst.getString(3)); c.setNombre(rst.getString(4)); c.setApellido(rst.getString(5)); c.setCorreo(rst.getString(6)); c.setTelefono(rst.getString(7)); c.setMovil(rst.getString(8)); c.setGrupo(rst.getString(9)); listado.add(c); } return listado; } public static Contacto buscaContactoPorCorreo(String empresa, String correo, Connection conexion, String sesion) throws SQLException{ List<Contacto> contactos = obtieneContactos(empresa, "",conexion, sesion); for(Contacto contacto:contactos){ if(contacto.getCorreo().equals(correo)){ return contacto; } } return null; } public static List<Contacto> listaContactos(Connection conexion, String sesion) throws SQLException { List<Contacto> listado = new LinkedList<Contacto>(); SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, sesion)); ResultSet rst = Conexion.consultaStoreProcedure(conexion, "listaContactos", params); while (rst.next()) { Contacto c = new Contacto(); c.setPersona(rst.getString(1)); c.setRazonSocial(rst.getString(2)); c.setIdentificador(rst.getString(3)); c.setNombre(rst.getString(4)); c.setApellido(rst.getString(5)); c.setCorreo(rst.getString(6)); c.setTelefono(rst.getString(7)); c.setMovil(rst.getString(8)); c.setGrupo(rst.getString(9)); listado.add(c); } return listado; } public static List<Contacto> listaContactosSuspendidos(Connection conexion, String sesion) throws SQLException { List<Contacto> listado = new LinkedList<Contacto>(); SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, sesion)); ResultSet rst = Conexion.consultaStoreProcedure(conexion, "listaContactosSuspendidos", params); while (rst.next()) { Contacto c = new Contacto(); c.setPersona(rst.getString(1)); c.setRazonSocial(rst.getString(2)); c.setIdentificador(rst.getString(3)); c.setNombre(rst.getString(4)); c.setApellido(rst.getString(5)); c.setCorreo(rst.getString(6)); c.setTelefono(rst.getString(7)); c.setMovil(rst.getString(8)); c.setGrupo(rst.getString(9)); listado.add(c); } return listado; } public boolean insertaContacto(String contrasenia, Connection conexion, String sesion) throws SQLException { SpParams params = new SpParams(); //registraContacto(IN{persona,primero,segundo,correo,telefono,movil,grupo,valenc,sesion},OUT{referencia,error}) params.add(new SpParam(1, Types.VARCHAR, persona)); params.add(new SpParam(2, Types.VARCHAR, nombre)); params.add(new SpParam(3, Types.VARCHAR, apellido)); params.add(new SpParam(4, Types.VARCHAR, correo)); params.add(new SpParam(5, Types.VARCHAR, telefono)); params.add(new SpParam(6, Types.VARCHAR, movil)); params.add(new SpParam(7, Types.VARCHAR, grupo)); params.add(new SpParam(8, Types.VARCHAR, contrasenia)); params.add(new SpParam(9, Types.VARCHAR, sesion)); params.add(new SpParam(10, Types.VARCHAR, null, true)); params.add(new SpParam(11, Types.VARCHAR, null, true)); Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, "registraContacto", params); if (vuelta != null && vuelta.length == 2) { if (String.valueOf(vuelta[1]).length() > 0) { throw new SQLException((String) vuelta[1], "0"); } else { this.identificador = (String) vuelta[0]; return true; } } else { throw new SQLException("No se ha logrado obtener la respuesta del procedimiento.", "0"); } } public boolean actualizarContacto(String identificador, Connection conexion, String sesion) throws SQLException { SpParams params = new SpParams(); //actualizaContacto(IN{identificador,primero,segundo,correo,telefono,movil,sesion}, OUT{error}) params.add(new SpParam(1, Types.VARCHAR, identificador)); params.add(new SpParam(2, Types.VARCHAR, nombre)); params.add(new SpParam(3, Types.VARCHAR, apellido)); params.add(new SpParam(4, Types.VARCHAR, correo)); params.add(new SpParam(5, Types.VARCHAR, telefono)); params.add(new SpParam(6, Types.VARCHAR, movil)); params.add(new SpParam(7, Types.VARCHAR, sesion)); params.add(new SpParam(8, Types.VARCHAR, null, true)); Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, "actualizaContacto", params); if (vuelta != null && vuelta.length == 1) { if (String.valueOf(vuelta[0]).length() > 0) { throw new SQLException((String) vuelta[0], "0"); } else { this.identificador = identificador; return true; } } else { throw new SQLException("No se ha logrado obtener la respuesta del procedimiento.", "0"); } } public boolean eliminarContacto(String identificador, Connection conexion, String sesion) throws SQLException{ SpParams params = new SpParams(); //actualizaContacto(IN{identificador,primero,segundo,correo,telefono,movil,sesion}, OUT{error}) params.add(new SpParam(1, Types.VARCHAR, identificador)); params.add(new SpParam(2, Types.VARCHAR, correo)); params.add(new SpParam(3, Types.VARCHAR, sesion)); params.add(new SpParam(4, Types.VARCHAR, null, true)); Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, "eliminaContacto", params); if (vuelta != null && vuelta.length == 1) { if (String.valueOf(vuelta[0]).length() > 0) { throw new SQLException((String) vuelta[0], "0"); } else { return true; } } else { throw new SQLException("No se ha logrado obtener la respuesta del procedimiento.", "0"); } } */ } <file_sep>/db/ctrldoce_cep.sql DROP TABLE IF EXISTS cep_cabecera; DROP TABLE IF EXISTS cep; DROP TABLE IF EXISTS cep_concepto; DROP TABLE IF EXISTS cep_pago; DROP TABLE IF EXISTS cep_documento; CREATE TABLE cep_cabecera ( idceparc VARCHAR(32) NOT NULL, cdperson VARCHAR(32) NOT NULL, dstitulo VARCHAR(120) NOT NULL, dsobserv VARCHAR(250) NOT NULL, dsrefarc VARCHAR(250) NOT NULL, dsstatus CHAR(1) DEFAULT 'P', cdusuari VARCHAR(32) NOT NULL, programa VARCHAR(120) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idceparc) ); CREATE TABLE cep ( uuid VARCHAR(36) NOT NULL, version VARCHAR(12) NOT NULL, serie VARCHAR(12) NOT NULL, folio VARCHAR(12) NOT NULL, fecha DATETIME NOT NULL, subTotal DOUBLE(13,2) DEFAULT 0, moneda VARCHAR(3) DEFAULT 'MXN', total DOUBLE(13,2) DEFAULT 0, tipoDeComprobante VARCHAR(3) NOT NULL, lugarExpedicion VARCHAR(30) NOT NULL, xmlnsPago10 VARCHAR(90) NOT NULL, rfcEmisor VARCHAR(13) NOT NULL, nombreEmisor VARCHAR(120) NOT NULL, regimenFiscalEmisor VARCHAR(30) NOT NULL, rfcReceptor VARCHAR(13) NOT NULL, nombreReceptor VARCHAR(120) NOT NULL, usoCFDIReceptor VARCHAR(30) NOT NULL, rfcProvCertif VARCHAR(13) NOT NULL, versionTibreFiscal VARCHAR(12) NOT NULL, fechaTimbrado DATETIME NOT NULL, noCertificadoSAT VARCHAR(20) NOT NULL, versionPagos VARCHAR(12) NOT NULL, idceparc VARCHAR(32) NOT NULL REFERENCES cep_cabecera(idceparc), PRIMARY KEY(uuid) ); SELECT uuid,version,serie,folio,fecha,subTotal,moneda ,total,tipoDeComprobante,lugarExpedicion,xmlnsPago10,rfcEmisor, nombreEmisor,regimenFiscalEmisor,rfcReceptor,nombreReceptor,usoCFDIReceptor,rfcProvCertif,versionTibreFiscal, fechaTimbrado,noCertificadoSAT,versionPagos FROM cep WHERE uuid = 'uuid'; CREATE TABLE cep_concepto ( uuid VARCHAR(36) NOT NULL, claveProdServ VARCHAR(8) NOT NULL, cantidad int DEFAULT 1, claveUnidad VARCHAR(5) NOT NULL, descripcion VARCHAR(120) NOT NULL, valorUnitario DOUBLE DEFAULT 0, importe DOUBLE DEFAULT 0, PRIMARY KEY(uuid, claveProdServ) ); SELECT uuid,claveProdServ,cantidad,claveUnidad,descripcion,valorUnitario,importe FROM cep_concepto CREATE TABLE cep_pago ( uuid VARCHAR(36) NOT NULL, documentoRelacionado VARCHAR(36) NOT NULL, fechaPago DATETIME NOT NULL, formaDePago VARCHAR(5) NOT NULL, moneda VARCHAR(3) DEFAULT 'MXN' NOT NULL, monto DOUBLE(13,2) DEFAULT 0, rfcEmisorCtaOrd VARCHAR(13) NOT NULL, ctaOrdenante VARCHAR(18) NOT NULL, rfcEmisorCtaBen VARCHAR(13) NOT NULL, ctaBeneficiario VARCHAR(18) NOT NULL, PRIMARY KEY(uuid, documentoRelacionado) ); SELECT uuid, documentoRelacionado, fechaPago, formaDePago, moneda, monto, rfcEmisorCtaOrd, ctaOrdenante, rfcEmisorCtaBen, ctaBeneficiario, B.partida, B.folio, B.serie, B.moneda AS monedaPago, B.metodoDePago, B.numParcialidad, B.saldoAnt, B.pagado, B.saldoInsoluto FROM cep_pago A INNER JOIN cep_documento B ON A.uuid = B.uuid AND A.documentoRelacionado = B.documentoRelacionado CREATE TABLE cep_documento ( uuid VARCHAR(36) NOT NULL, documentoRelacionado VARCHAR(36) NOT NULL, partida int NOT NULL, folio VARCHAR(12) NOT NULL, serie VARCHAR(12) NOT NULL, moneda VARCHAR(3) DEFAULT 'MXN', metodoDePago VARCHAR(5) NOT NULL, numParcialidad INT DEFAULT 1, saldoAnt DOUBLE(13,2) DEFAULT 0, pagado DOUBLE(13,2) DEFAULT 0, saldoInsoluto DOUBLE(13,2) DEFAULT 0, PRIMARY KEY(uuid, documentoRelacionado) );<file_sep>/src/java/com/ftc/gedoc/dao/impl/PeriodoDAOImpl.java package com.ftc.gedoc.dao.impl; import com.ftc.aq.Comunes; import com.ftc.aq.Conexion; import com.ftc.aq.SpParam; import com.ftc.aq.SpParams; import com.ftc.gedoc.dao.PeriodoDAO; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Documento; import com.ftc.gedoc.utiles.Periodo; import com.ftc.gedoc.utiles.PeriodoCabecera; import com.ftc.gedoc.utiles.PeriodoCifraControl; import com.ftc.gedoc.utiles.PeriodoRegistro; import com.ftc.gedoc.utiles.TipoComprobante; import com.ftc.gedoc.utiles.CifraControl; import com.ftc.gedoc.utiles.CifraControlAjuste; import java.sql.Connection; import java.util.Date; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.ArrayList; import java.util.Calendar; import java.util.List; import java.util.Map; import java.util.logging.Level; import java.util.logging.Logger; public class PeriodoDAOImpl implements PeriodoDAO { Connection conexion = null; public PeriodoDAOImpl() { } /* SELECT a.idnumper, inanyper, innumper, dtfecape, dtfeccie, instatus, dscoment, d.monto, incuenta FROM jctm01t a LEFT JOIN jctm02t b ON a.idnumper = b.idnumper LEFT JOIN (SELECT COUNT(A.idreggas) incuenta, idnumper FROM jctm10t A INNER JOIN jctm09t B ON A.idreggas = B.idreggas WHERE B.instatus = 'A' OR B.instatus = 'Q' GROUP BY idnumper ) c ON a.idnumper = c.idnumper LEFT JOIN (SELECT idnumper, ROUND(IFNULL(SUM(dbimpreg),0)) monto FROM jctm10t A INNER JOIN jctm09t B ON A.idreggas = B.idreggas GROUP BY idnumper ) d ON a.idnumper = d.idnumper ORDER BY inanyper DESC, innumper DESC; */ @Override public List<Periodo> listado() throws GeDocDAOException { List<Periodo> periodos = new ArrayList<Periodo>(); try { conexion = Conexion.getConexion(); String sql = "SELECT a.idnumper, inanyper, innumper, dtfecape, dtfeccie, instatus, dscoment, d.monto, incuenta " + "FROM jctm01t a LEFT JOIN jctm02t b ON a.idnumper = b.idnumper " + "LEFT JOIN (" + " SELECT COUNT(A.idreggas) incuenta, idnumper FROM jctm10t A INNER JOIN jctm09t B ON A.idreggas = B.idreggas WHERE B.instatus = 'A' OR B.instatus = 'Q' GROUP BY idnumper " + " ) c ON a.idnumper = c.idnumper " + "LEFT JOIN ( " + " SELECT idnumper, ROUND(IFNULL(SUM(dbimpreg),0)) monto FROM jctm10t A INNER JOIN jctm09t B ON A.idreggas = B.idreggas GROUP BY idnumper " + " ) d ON a.idnumper = d.idnumper " + " ORDER BY inanyper DESC, innumper DESC;"; PreparedStatement stm = conexion.prepareStatement(sql); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { Periodo periodo = new Periodo(); periodo.setAny(rst.getInt(2)); periodo.setApertura(rst.getDate(4)); periodo.setCierre(rst.getDate(5) != null ? rst.getDate(5) : null); periodo.setComentario(rst.getString(7)); periodo.setEstatus(rst.getString(6)); periodo.setIdentificador(rst.getString(1)); periodo.setPeriodo(rst.getInt(3)); periodo.setMonto(rst.getDouble(8)); periodo.setCuenta(rst.getInt(9)); periodos.add(periodo); } } return periodos; } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public Periodo abrir(Periodo periodo) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sp = "abrirPeriodo"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.INTEGER, periodo.getAny())); params.add(new SpParam(2, Types.INTEGER, periodo.getPeriodo())); params.add(new SpParam(3, Types.VARCHAR, null, true)); // nuevo periodo params.add(new SpParam(4, Types.VARCHAR, null, true)); // fecha apertura params.add(new SpParam(5, Types.VARCHAR, null, true)); // estatus params.add(new SpParam(6, Types.VARCHAR, null, true)); // comentarios params.add(new SpParam(7, Types.VARCHAR, null, true)); // error Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 5) { if (vuelta[4] != null && !((String) vuelta[4]).isEmpty()) { throw new GeDocDAOException((String) vuelta[4]); } else { System.out.println("Quedo abierto el nuevo periodo: " + (String) vuelta[0]); periodo.setIdentificador((String) vuelta[0]); String sFecha = (String) vuelta[1]; Date fecha; try { fecha = Comunes.toFechaSQL(sFecha, "-"); } catch (Exception e) { System.out.println("=====> Error en la transformacion de fecha de aperetura del periodo." + e.getMessage()); e.printStackTrace(System.out); //en caso de que no se logre tomar la fecha de la base de datos, colocamos la fecha del appServer fecha = new Date(Calendar.getInstance().getTimeInMillis()); } periodo.setApertura(fecha); periodo.setEstatus((String) vuelta[2]); periodo.setComentario((String) vuelta[3]); } } else { throw new GeDocDAOException("El procedimiento para abrir un nuevo periodo no retorno los parametros necesarios. Se esperaban 5 regreso " + vuelta.length); } return periodo; } catch (SQLException e) { throw new GeDocDAOException("La qpertura de periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public Periodo cerrar(Periodo periodo) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sp = "cerrarPeriodo"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, periodo.getIdentificador())); params.add(new SpParam(2, Types.VARCHAR, periodo.getComentario())); params.add(new SpParam(3, Types.DATE, null, true)); // fecha cierre params.add(new SpParam(4, Types.VARCHAR, null, true)); // estatus params.add(new SpParam(5, Types.VARCHAR, null, true)); // error Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 3) { if (vuelta[2] != null && !((String) vuelta[2]).isEmpty()) { throw new GeDocDAOException((String) vuelta[2]); } else { System.out.println("Quedo cerrado el periodo: " + periodo.getIdentificador()); periodo.setCierre((Date) vuelta[0]); periodo.setEstatus((String) vuelta[1]); } } else { throw new GeDocDAOException("El procedimiento para cerrar un nuevo periodo no retorno los parametros necesarios. Se esperaban 5 regreso " + vuelta.length); } return periodo; } catch (SQLException e) { throw new GeDocDAOException("El cierre de periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public Periodo activo() throws GeDocDAOException { try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idnumper, inanyper, innumper, dtfecape, dtfeccie, instatus, dscoment "); sql.append("FROM jctm01t "); sql.append(" WHERE instatus = ?"); sql.append("LIMIT 1;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, "A"); ResultSet rst = stm.executeQuery(); if (rst != null) { if (rst.next()) { Periodo periodo = new Periodo(); periodo.setAny(rst.getInt(2)); periodo.setApertura(rst.getDate(4)); periodo.setCierre(rst.getDate(5) != null ? rst.getDate(5) : null); periodo.setComentario(rst.getString(7)); periodo.setEstatus(rst.getString(6)); periodo.setIdentificador(rst.getString(1)); periodo.setPeriodo(rst.getInt(3)); return periodo; } } return new Periodo(); } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public Periodo encuentraPorId(String id) throws GeDocDAOException { try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idnumper, inanyper, innumper, dtfecape, dtfeccie, instatus, dscoment "); sql.append("FROM jctm01t "); sql.append(" WHERE idnumper = ?"); sql.append(" ORDER BY inanyper DESC, innumper DESC;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, id); ResultSet rst = stm.executeQuery(); if (rst != null) { if (rst.next()) { Periodo periodo = new Periodo(); periodo.setAny(rst.getInt(2)); periodo.setApertura(rst.getDate(4)); periodo.setCierre(rst.getDate(5) != null ? rst.getDate(5) : null); periodo.setComentario(rst.getString(7)); periodo.setEstatus(rst.getString(6)); periodo.setIdentificador(rst.getString(1)); periodo.setPeriodo(rst.getInt(3)); return periodo; } } return new Periodo(); } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public void eliminaRegistros(String identificador) throws GeDocDAOException { try { conexion = Conexion.getConexion(); SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, identificador)); params.add(new SpParam(2, Types.VARCHAR, null, true)); Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, "eliminaRegistro", params); if (vuelta != null && vuelta.length == 1) { if (((String) vuelta[0]).isEmpty()) { System.out.println("Se eliminó correctamente el registro."); } else { throw new GeDocDAOException("Fallo al ejecutar el procedimiento. " + (String) vuelta[0]); } } else { throw new GeDocDAOException("Fallo al ejecutar el procedimiento. No se devolvieron los parametros esperados, se esperaba 1 y se encontraron " + (vuelta != null ? vuelta.length : "null")); } } catch (SQLException e) { throw new GeDocDAOException("Hubo un error al eliminar los datos de registro, la operación no se completa.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public List<PeriodoCabecera> listaCabecerasConImporte(String id, String tipoGasto, String... params) throws GeDocDAOException { List<PeriodoCabecera> registros = new ArrayList<PeriodoCabecera>(); try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT A.idreggas, dsasocia, A.idnumper, A.intipgas, A.instatus, fefecreg, dsdocto, dsrefdoc, total, cuenta "); sql.append("FROM jctm09t A LEFT JOIN ("); sql.append(" SELECT idnumper, idreggas, IFNULL(SUM(dbimpreg), 0) AS total, IFNULL(COUNT(idreggas), 0) cuenta FROM jctc10v GROUP BY idnumper,idreggas "); sql.append(") C ON A.idreggas = C.idreggas AND C.idnumper = A.idnumper"); sql.append(" WHERE (A.idnumper = ? AND A.intipgas = ? AND A.instatus = ?) "); if (params.length > 0) { String and = ""; for (String param : params) { if (param.startsWith("fechas:")) { String[] periodo = param.substring(param.indexOf(":") + 1).split(","); if (periodo.length == 2) { Date fecha1 = Comunes.DMAtoFechaSQL(periodo[0]); Date fecha2 = Comunes.DMAtoFechaSQL(periodo[1]); and = String.format(" fefecreg BETWEEN '%s' AND '%s' ", Comunes.date2String(fecha1, -3), Comunes.date2String(fecha2, -3)); } else { System.out.println("Parametro de fechas incorrecto: " + param); } } else if (param.startsWith("asignadoA:")) { and = " dsasocia LIKE ('%" + param.substring(param.indexOf(":") + 1) + "%') "; } else if (param.startsWith("documento:")) { and = " dsdocto LIKE ('%" + param.substring(param.indexOf(":") + 1) + "%') "; } else if (param.startsWith("referencia:")) { and = " dsrefdoc LIKE ('%" + param.substring(param.indexOf(":") + 1) + "%') "; } else if (param.startsWith("estatus:")) { and = String.format(" instatus = '%s' ", param.substring(param.indexOf(":") + 1, param.indexOf(":") + 2)); } else { System.out.println("Parametro no localizado: " + param); } sql.append(" AND").append(and); } } sql.append("ORDER BY dsasocia;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, id); stm.setString(2, tipoGasto); stm.setString(3, "A"); System.out.println("Consulta de gasto con importe: " + stm.toString()); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { PeriodoCabecera pc = new PeriodoCabecera(); pc.setIdentificador(rst.getString(1)); pc.setAsociadoA(rst.getString(2)); pc.setTipo(rst.getString(4)); pc.setEstatus(rst.getString(5)); pc.setFecha(rst.getDate(6)); pc.setDocumento(rst.getString(7)); pc.setReferencia(rst.getString(8)); pc.setRegistros(new ArrayList<PeriodoRegistro>()); pc.setImporte(rst.getDouble(9)); pc.setCuentaFueraPeriodo(rst.getInt(10)); registros.add(pc); } } return registros; } catch (SQLException e) { //System.out.println("Consulta de gasto con importe: " + e.getMessage() + " -> " + e.getCause()); throw new GeDocDAOException("La consulta de datos de registros en el periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public List<PeriodoCabecera> listaCabeceras(String id, String tipoGasto) throws GeDocDAOException { List<PeriodoCabecera> registros = new ArrayList<PeriodoCabecera>(); try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idreggas, dsasocia, idnumper, intipgas, instatus, fefecreg, dsdocto, dsrefdoc "); sql.append("FROM jctm09t "); sql.append("WHERE idnumper = ? AND intipgas = ? "); sql.append("ORDER BY dsasocia;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, id); stm.setString(2, tipoGasto); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { PeriodoCabecera pc = new PeriodoCabecera(); pc.setAsociadoA(rst.getString(2)); pc.setIdentificador(rst.getString(1)); pc.setTipo(rst.getString(4)); pc.setEstatus(rst.getString(5)); pc.setFecha(rst.getDate(6)); pc.setDocumento(rst.getString(7)); pc.setReferencia(rst.getString(8)); pc.setRegistros(new ArrayList<PeriodoRegistro>()); registros.add(pc); } } return registros; } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de registros en el periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } // @Override // public List<PeriodoCabecera> listaCabeceras(String id) throws GeDocDAOException { // List<PeriodoCabecera> registros = new ArrayList<PeriodoCabecera>(); // try { // conexion = Conexion.getConexion(); // StringBuilder sql = new StringBuilder(); // sql.append("SELECT idreggas, dsasocia, idnumper, intipgas, instatus, fefecreg, dsdocto, dsrefdoc, "); // sql.append("(SELECT SUM(B.dbimpreg) AS total FROM jctc10v B WHERE B.idreggas = A.idreggas GROUP BY B.idreggas) AS importe "); // sql.append("FROM jctm09t A"); // sql.append("WHERE idnumper = ? "); // sql.append("ORDER BY dsasocia;"); // PreparedStatement stm = conexion.prepareStatement(sql.toString()); // stm.setString(1, id); // ResultSet rst = stm.executeQuery(); // if (rst != null) { // while (rst.next()) { // PeriodoCabecera pc = new PeriodoCabecera(); // pc.setAsociadoA(rst.getString(2)); // pc.setIdentificador(rst.getString(1)); // pc.setTipo(rst.getString(4)); // pc.setEstatus(rst.getString(5)); // pc.setFecha(rst.getDate(6)); // pc.setDocumento(rst.getString(7)); // pc.setReferencia(rst.getString(8)); // pc.setRegistros(new ArrayList<PeriodoRegistro>()); // registros.add(pc); // } // } // return registros; // } catch (SQLException e) { // throw new GeDocDAOException("La consulta de datos de registros en el periodo genero una excepción, revise el log para más detalles.", e); // } finally { // try { // if (conexion != null & !conexion.isClosed()) { // conexion.close(); // } // } catch (SQLException e) { // } // } // } @Override public List<PeriodoCabecera> listaCabeceras(String id) throws GeDocDAOException { List<PeriodoCabecera> registros = new ArrayList<PeriodoCabecera>(); try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idreggas, dsasocia, idnumper, intipgas, instatus, fefecreg, dsdocto, dsrefdoc "); sql.append("FROM jctm09t "); sql.append("WHERE idnumper = ? AND instatus = 'A' OR instatus = 'Q' "); // --> cafaray 221217: solo permite los pendientes de autorizar y los que tienen pendiente aplicar ajuste de periodo sql.append("ORDER BY dsasocia;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, id); // stm.setString(2, "A"); // --> cafaray 221217: solo permite los que están pendientes de autorizar ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { PeriodoCabecera pc = new PeriodoCabecera(); pc.setAsociadoA(rst.getString(2)); pc.setIdentificador(rst.getString(1)); pc.setTipo(rst.getString(4)); pc.setEstatus(rst.getString(5)); pc.setFecha(rst.getDate(6)); pc.setDocumento(rst.getString(7)); pc.setReferencia(rst.getString(8)); pc.setRegistros(new ArrayList<PeriodoRegistro>()); registros.add(pc); } } return registros; } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de registros en el periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public PeriodoCabecera registraCabecera(String id, PeriodoCabecera periodoCabecera) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sp = "registraCabecera"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, id)); params.add(new SpParam(2, Types.VARCHAR, periodoCabecera.getAsociadoA())); params.add(new SpParam(3, Types.VARCHAR, periodoCabecera.getTipo())); // -> cfa:121115 se solicita la inclusión de tres campos de control para la cabecera de registro params.add(new SpParam(4, Types.DATE, new java.sql.Date(periodoCabecera.getFecha().getTime()))); params.add(new SpParam(5, Types.VARCHAR, periodoCabecera.getDocumento())); params.add(new SpParam(6, Types.VARCHAR, periodoCabecera.getReferencia())); // <- cfa:121115 params.add(new SpParam(7, Types.VARCHAR, null, true)); params.add(new SpParam(8, Types.VARCHAR, null, true)); Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 2) { if (((String) vuelta[1]).isEmpty()) { periodoCabecera.setIdentificador((String) vuelta[0]); return periodoCabecera; } else { throw new GeDocDAOException("Fallo al ejecutar el procedimiento: " + (String) vuelta[1]); } } else { throw new GeDocDAOException("Fallo al ejecutar el procedimiento. No se devolvieron los parametros esperados, se esperaba 2 y se encontraron " + (vuelta != null ? vuelta.length : "null")); } } catch (SQLException e) { throw new GeDocDAOException("No se logró crear la cabecera del registro de gasto: " + e.getSQLState(), e); } catch (NullPointerException e) { throw new GeDocDAOException("No se logró crearla cabecera del registro de gasto. Se encontro un apuntador nulo " + e.getMessage(), e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException ex) { Logger.getLogger(PeriodoDAOImpl.class.getName()).log(Level.SEVERE, null, ex); } } } @Override public void eliminaCabecera(PeriodoCabecera periodoCabecera) throws GeDocDAOException { eliminaCabecera(periodoCabecera.getIdentificador()); } @Override public void eliminaCabecera(String idCabecera) throws GeDocDAOException { try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("DELETE FROM jctm09t WHERE idreggas = ?"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, idCabecera); stm.executeUpdate(); } catch (SQLException e) { throw new GeDocDAOException("No se logró eliminar la cabecera del registro de gasto.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException ex) { Logger.getLogger(PeriodoDAOImpl.class.getName()).log(Level.SEVERE, null, ex); } } } @Override public void eliminaCabeceras(String id) throws GeDocDAOException { try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("DELETE FROM jctm09t WHERE idnumper = ?"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, id); stm.executeUpdate(); } catch (SQLException e) { throw new GeDocDAOException("No se logró eliminar la cabecera del registro de gasto.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException ex) { Logger.getLogger(PeriodoDAOImpl.class.getName()).log(Level.SEVERE, null, ex); } } } @Override public PeriodoCabecera encuentraCabeceraPorId(String idCabecera) throws GeDocDAOException { try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idreggas, dsasocia, idnumper, intipgas, instatus, fefecreg, dsdocto, dsrefdoc "); sql.append(" FROM jctm09t "); sql.append(" WHERE idreggas = ? ORDER BY tmstmp, dsasocia;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, idCabecera); ResultSet rst = stm.executeQuery(); PeriodoCabecera pc = new PeriodoCabecera(); if (rst != null && rst.next()) { pc.setAsociadoA(rst.getString(2)); pc.setIdentificador(rst.getString(1)); pc.setTipo(rst.getString(4)); pc.setEstatus(rst.getString(5)); pc.setFecha(rst.getDate(6)); pc.setDocumento(rst.getString(7)); pc.setReferencia(rst.getString(8)); } else { throw new GeDocDAOException("No se localizo la cabecera indicada [" + idCabecera + "]."); } return pc; } catch (SQLException e) { throw new GeDocDAOException("No se logró eliminar la cabecera del registro de gasto.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException ex) { Logger.getLogger(PeriodoDAOImpl.class.getName()).log(Level.SEVERE, null, ex); } } } @Override public PeriodoRegistro encuentraRegistroPorId(String idRegistro) throws GeDocDAOException { try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idregper, dsregper, intipreg, dtfecreg, dbimpreg, dbimpues, instatus, dsnotreg, iddocele "); sql.append(" FROM jctc10v "); sql.append(" WHERE idregper = ? "); sql.append(" ORDER BY dtfecreg;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, idRegistro); ResultSet rst = stm.executeQuery(); PeriodoRegistro pr = new PeriodoRegistro(); if (rst != null) { if (rst.next()) { pr.setEstatus(rst.getString(7)); Documento documento = new Documento(); pr.setEvidencia(rst.getString(9)); pr.setFecha(rst.getDate(4)); pr.setImporte(rst.getDouble(5)); pr.setImpuesto(rst.getDouble(6)); pr.setNota(rst.getString(2)); pr.setRegistro(rst.getString(1)); pr.setTipo(rst.getString(3)); pr.setDescripcion(rst.getString(8)); } } return pr; } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de registros en el periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public List<PeriodoRegistro> listaRegistrosPeriodo(Periodo periodo) throws GeDocDAOException { List<PeriodoRegistro> registros = new ArrayList<PeriodoRegistro>(); try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idregper, intipreg, dtfecreg, dbimpreg, dbimpues, instatus, dsnotreg, iddocele, dsautori, dsregper "); sql.append(" FROM jctc10v "); sql.append("WHERE idreggas IN (SELECT idreggas FROM jctm09t WHERE idnumper = ?) "); sql.append("ORDER BY dtfecreg;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, periodo.getIdentificador()); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { PeriodoRegistro pr = new PeriodoRegistro(); pr.setEstatus(rst.getString(6)); pr.setFecha(rst.getDate(3) != null ? new java.util.Date(rst.getDate(3).getTime()) : new java.util.Date()); pr.setImporte(rst.getDouble(4)); pr.setImpuesto(rst.getDouble(5)); pr.setNota(rst.getString(7)); pr.setRegistro(rst.getString(1)); pr.setTipo(rst.getString(2)); pr.setDescripcion(rst.getString(10)); pr.setAutoriza(rst.getString(9)); pr.setEvidencia(rst.getString(8)); registros.add(pr); } } return registros; } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de registros en el periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public CifraControl cierreCifraControl(Periodo periodo) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sp = "cifrasControl"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, periodo.getIdentificador())); params.add(new SpParam(2, Types.VARCHAR, null, true)); // manejador de error Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 1) { String error = vuelta[0] == null ? "" : (String) vuelta[0]; if (error.isEmpty()) { return getCifraControl(periodo.getIdentificador()); } else { throw new GeDocDAOException("Fallo al ejecutar el procedimiento. " + error); } } else { throw new GeDocDAOException("Fallo al ejecutar el procedimiento. No se devolvieron los parametros esperados, se esperaba 1 y se encontraron " + (vuelta != null ? vuelta.length : "null")); } } catch (SQLException e) { throw new GeDocDAOException("La ejecución del procedimiento de cifras de control del periodo generó una excepción, revise el log para más detalles.", e); } catch (NullPointerException e) { throw new GeDocDAOException("La ejecución del procedimiento de cifras de control del periodo generó una excepción, al parecer el valor es nulo en el retorno.", e); } finally { try { conexion.setAutoCommit(true); if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public String cierreCifraControlAjuste(Periodo periodo) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sp = "cifrasControlAjuste"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, periodo.getIdentificador())); params.add(new SpParam(2, Types.VARCHAR, null, true)); // identificador de ajuste params.add(new SpParam(3, Types.VARCHAR, null, true)); // manejador de error Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 2) { String idAjuste = (vuelta[0] == null ? "" : (String) vuelta[0]); String error = vuelta[1] == null ? "" : (String) vuelta[1]; if (error.isEmpty()) { if (!idAjuste.isEmpty()) { return idAjuste; } else { throw new GeDocDAOException("El procedimiento se ha ejecutado correctamente, pero no se identifico el numero del ajuste.. " + idAjuste); } } else { throw new GeDocDAOException("Fallo al ejecutar el procedimiento. " + error); } } else { throw new GeDocDAOException("Fallo al ejecutar el procedimiento. No se devolvieron los parametros esperados, se esperaba 2 y se encontraron " + (vuelta != null ? vuelta.length : "null")); } } catch (SQLException e) { throw new GeDocDAOException("La ejecución del procedimiento de cifras de control de ajuste en el periodo generó una excepción, revise el log para más detalles.", e); } catch (NullPointerException e) { throw new GeDocDAOException("La ejecución del procedimiento de cifras de control de ajuste en el periodo generó una excepción, al parecer el valor es nulo en el retorno.", e); } finally { try { conexion.setAutoCommit(true); if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public List<PeriodoRegistro> listaRegistros(String id) throws GeDocDAOException { List<PeriodoRegistro> registros = new ArrayList<PeriodoRegistro>(); try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idregper, intipreg, dtfecreg, dbimpreg, dbimpues, instatus, dsnotreg, iddocele, dsautori, dsregper "); sql.append(" FROM jctc10v "); sql.append("WHERE idreggas = ? "); sql.append("ORDER BY dtfecreg;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, id); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { PeriodoRegistro pr = new PeriodoRegistro(); pr.setEstatus(rst.getString(6)); Documento documento = new Documento(); pr.setFecha(rst.getDate(3) != null ? new java.util.Date(rst.getDate(3).getTime()) : new java.util.Date()); pr.setImporte(rst.getDouble(4)); pr.setImpuesto(rst.getDouble(5)); pr.setNota(rst.getString(7)); pr.setRegistro(rst.getString(1)); pr.setTipo(rst.getString(2)); pr.setDescripcion(rst.getString(10)); pr.setAutoriza(rst.getString(9)); pr.setEvidencia(rst.getString(8)); registros.add(pr); } } return registros; } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de registros en el periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public PeriodoCifraControl obtieneCifraControl(String id) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sql = "SELECT innumreg, dbmonto FROM jctm02t WHERE idnumper = ?;"; PreparedStatement stm = conexion.prepareStatement(sql); ResultSet rst = stm.executeQuery(); if (rst != null) { if (rst.next()) { PeriodoCifraControl cifraControl = new PeriodoCifraControl(); cifraControl.setRegistros(rst.getInt(1)); cifraControl.setMonto(rst.getDouble(2)); return cifraControl; } } throw new GeDocDAOException("El elemento de cifra control para el periodo no se encuentra registrado."); } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de registros en el periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public PeriodoCifraControl insertaCifraControl(String id, PeriodoCifraControl cifraControl) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sp = "registraCifraControl"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, id)); params.add(new SpParam(2, Types.INTEGER, cifraControl.getRegistros())); params.add(new SpParam(3, Types.DOUBLE, cifraControl.getMonto())); params.add(new SpParam(4, Types.VARCHAR, null, true)); //error Object vuelta[] = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 1) { if (((String) vuelta[0]).isEmpty()) { return cifraControl; } else { throw new GeDocDAOException((String) vuelta[0]); } } else if (vuelta != null) { throw new GeDocDAOException("Los parametros de salida del procedimiento son inconrrectos, se esperaban 1 se tienen " + vuelta.length); } else { throw new GeDocDAOException("No se obtuevo respuesta desde la base de datos."); } } catch (SQLException e) { throw new GeDocDAOException("La actualizacion de cifras control genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public PeriodoCifraControl actualizaCifraControl(String id, PeriodoCifraControl cifraControl) throws GeDocDAOException { return insertaCifraControl(id, cifraControl); } @Override public List<PeriodoRegistro> insertaRegistros(String id, List<PeriodoRegistro> periodoRegistros) throws GeDocDAOException { for (PeriodoRegistro periodoRegistro : periodoRegistros) { insertaRegistro(id, periodoRegistro); } return periodoRegistros; } @Override public PeriodoRegistro insertaRegistro(String id, PeriodoRegistro periodoRegistro) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sp = "insertaRegistro"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, id)); params.add(new SpParam(2, Types.VARCHAR, periodoRegistro.getDescripcion())); params.add(new SpParam(3, Types.VARCHAR, periodoRegistro.getTipo())); params.add(new SpParam(4, Types.DATE, new java.sql.Date(periodoRegistro.getFecha().getTime()))); params.add(new SpParam(5, Types.DOUBLE, periodoRegistro.getImporte())); params.add(new SpParam(6, Types.DOUBLE, periodoRegistro.getImpuesto())); params.add(new SpParam(7, Types.VARCHAR, periodoRegistro.getNota())); params.add(new SpParam(8, Types.VARCHAR, periodoRegistro.getAutoriza())); params.add(new SpParam(9, Types.VARCHAR, null, true)); // identificador de registro params.add(new SpParam(10, Types.VARCHAR, null, true)); // Error Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 2) { if (vuelta[1] != null && !((String) vuelta[1]).isEmpty()) { throw new GeDocDAOException((String) vuelta[1]); } else { periodoRegistro.setRegistro((String) vuelta[0]); if (periodoRegistro.getEvidencia() != null) { //registra la evidencia params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, id)); params.add(new SpParam(2, Types.VARCHAR, periodoRegistro.getRegistro())); params.add(new SpParam(3, Types.VARCHAR, periodoRegistro.getEvidencia())); params.add(new SpParam(4, Types.VARCHAR, null, true)); // identificador de registro params.add(new SpParam(5, Types.VARCHAR, null, true)); // Error vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, "insertaEvidencia", params); if (vuelta != null && vuelta.length == 2) { if (vuelta[1] != null && !((String) vuelta[1]).isEmpty()) { throw new GeDocDAOException((String) vuelta[1]); } } } return periodoRegistro; } } else if (vuelta != null) { throw new GeDocDAOException("El procedimiento de registros no devolvio los parametros esperados, se peraban 2 y se tienen " + vuelta.length); } else { throw new GeDocDAOException("El procedimiento de registros no devolvio resultados"); } } catch (SQLException e) { throw new GeDocDAOException("El procedimiento de registros devolvio una excepcion.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public void eliminaRegistro(PeriodoRegistro periodoRegistro) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sp = "eliminaRegistro"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, periodoRegistro.getRegistro())); params.add(new SpParam(2, Types.VARCHAR, null, true)); Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 1) { System.out.println("=====> Se ha eliminado el registro " + periodoRegistro.getRegistro() + " correctamente."); } else { throw new GeDocDAOException("El procedimiento de registros no devolvio los parametros esperados, se peraban 1 y se tienen " + (vuelta != null ? vuelta.length : "null")); } } catch (SQLException e) { throw new GeDocDAOException("El procedimiento de registros devolvio una excepcion.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public List<String> listaAsignados() throws GeDocDAOException { try { conexion = Conexion.getConexion(); List<String> listado = new ArrayList<String>(); String sql = "SELECT dsasocia FROM jctm09t ORDER BY dsasocia;"; PreparedStatement stm = conexion.prepareStatement(sql); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { listado.add(rst.getString(1)); } } return listado; } catch (SQLException e) { throw new GeDocDAOException("El procedimiento de registros devolvio una excepcion.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public Map<String, String> listadoTipoComprobante(String tipoGasto) throws GeDocDAOException { Map<String, String> listado = TipoComprobante.listaTipoComprobante(tipoGasto); return listado; } @Override public List<PeriodoRegistro> pendientesAprobacion(String idCabecera) throws GeDocDAOException { try { conexion = Conexion.getConexion(); List<PeriodoRegistro> listado = new ArrayList<PeriodoRegistro>(); StringBuilder sql = new StringBuilder("SELECT "); sql.append(" idreggas,idregper,intipreg,dtfecreg,dbimpreg,dbimpues,dsautori,instatus,dsnotreg,dsfiles "); sql.append(" FROM jctc10v "); sql.append(" WHERE dsautori = '';"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { PeriodoRegistro pr = new PeriodoRegistro(); pr.setAutoriza(""); pr.setDescripcion(rst.getString(9)); pr.setEstatus("8"); pr.setFecha(new java.util.Date(rst.getDate(4).getTime())); pr.setImporte(rst.getDouble(5)); pr.setImpuesto(rst.getDouble(6)); pr.setNota(rst.getString(9)); pr.setRegistro(rst.getString(1)); pr.setTipo(rst.getString(3)); pr.setEvidencia(rst.getString(10)); listado.add(pr); } } return listado; } catch (SQLException e) { throw new GeDocDAOException("El procedimiento de registros devolvio una excepcion.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public PeriodoRegistro actualizaTipoComprobanteRegistro(PeriodoRegistro periodoRegistro, String tipoComprobante) throws GeDocDAOException { try { if (periodoRegistro != null) { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder("UPDATE jctm10t SET "); sql.append(" intipreg = ?"); sql.append(" WHERE idregper = ? AND instatus = 'A';"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, tipoComprobante); stm.setString(2, periodoRegistro.getRegistro()); int x = stm.executeUpdate(); if (x > 0) { periodoRegistro.setTipo(tipoComprobante); } } return periodoRegistro; } catch (SQLException e) { throw new GeDocDAOException("La actualización del registro devolvio una excepcion.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public PeriodoRegistro actualizaAutorizaRegistro(PeriodoRegistro periodoRegistro, String autoriza) throws GeDocDAOException { try { if (periodoRegistro != null) { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder("UPDATE jctm10t SET "); sql.append(" dsautori = ?, instatus = 'A' "); sql.append(" WHERE idregper = ? AND instatus = 'P';"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, autoriza); stm.setString(2, periodoRegistro.getRegistro()); System.out.println("Ready to update Autoriza in records: " + stm.toString()); int x = stm.executeUpdate(); if (x > 0) { periodoRegistro.setAutoriza(autoriza); } } return periodoRegistro; } catch (SQLException e) { throw new GeDocDAOException("La actualización del registro devolvio una excepcion.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public PeriodoRegistro actualizaEstadoRegistro(PeriodoRegistro periodoRegistro, String estado) throws GeDocDAOException { try { if (periodoRegistro != null) { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder("UPDATE jctm10t SET "); sql.append(" instatus = ?"); sql.append(" WHERE idregper = ? AND instatus = 'A';"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, estado); stm.setString(2, periodoRegistro.getRegistro()); int x = stm.executeUpdate(); if (x > 0) { periodoRegistro.setEstatus(estado); } } return periodoRegistro; } catch (SQLException e) { throw new GeDocDAOException("La actualización del registro devolvio una excepcion.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public PeriodoCabecera cierraCabecera(String idCabecera) throws GeDocDAOException { try { PeriodoCabecera cabecera = encuentraCabeceraPorId(idCabecera); if (cabecera != null) { conexion = Conexion.getConexion(); String sp = "cierraRegistroCabecera"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, cabecera.getIdentificador())); params.add(new SpParam(2, Types.VARCHAR, null, true)); //nuevo estatus params.add(new SpParam(3, Types.VARCHAR, null, true)); //error en caso de que ocurra Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 2) { if (vuelta[1].toString().isEmpty()) { cabecera.setEstatus((String) vuelta[0]); } else { throw new GeDocDAOException((String) vuelta[1]); } } else { throw new GeDocDAOException("El llamado al procedimiento genero una respuesta no esperada, se esperaban 2 y se tienen " + (vuelta != null ? vuelta.length : "null")); } return cabecera; } else { throw new GeDocDAOException("El registro de control de gasto no existe, verifique " + idCabecera); } } catch (SQLException e) { throw new GeDocDAOException("Ocurrio un error al actualizar el estado del registro de control de gasto.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { e.printStackTrace(System.out); } } } @Override public PeriodoCabecera cierraCabeceraAjuste(String idCabecera) throws GeDocDAOException { try { System.out.printf("Prepara llamada a proceso: cierraRegistroCabeceraAjuste (%s)", idCabecera); PeriodoCabecera cabecera = encuentraCabeceraPorId(idCabecera); System.out.printf("\t\t\t[OK:%s]\n",cabecera.getIdentificador()); if (cabecera != null) { System.out.printf("Ejecuta proceso: cierraRegistroCabeceraAjuste (%s)", cabecera.getIdentificador()); conexion = Conexion.getConexion(); String sp = "cierraRegistroCabeceraAjuste"; SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, cabecera.getIdentificador())); params.add(new SpParam(2, Types.VARCHAR, null, true)); //nuevo estatus params.add(new SpParam(3, Types.VARCHAR, null, true)); //error en caso de que ocurra Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, sp, params); if (vuelta != null && vuelta.length == 2) { if (vuelta[1].toString().isEmpty()) { cabecera.setEstatus((String) vuelta[0]); } else { throw new GeDocDAOException((String) vuelta[1]); } } else { throw new GeDocDAOException("El llamado al procedimiento genero una respuesta no esperada, se esperaban 2 y se tienen " + (vuelta != null ? vuelta.length : "null")); } return cabecera; } else { throw new GeDocDAOException("El registro de control de gasto no existe, verifique " + idCabecera); } } catch (SQLException e) { throw new GeDocDAOException("Ocurrio un error al actualizar el estado del registro de control de gasto.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { e.printStackTrace(System.out); } } } @Override public PeriodoCabecera encuentraPorRegisro(String idRegistro) throws GeDocDAOException { try { PeriodoRegistro registro = encuentraRegistroPorId(idRegistro); if (registro != null) { conexion = Conexion.getConexion(); String sql = "SELECT idreggas, dsasocia, idnumper, intipgas, instatus FROM jctm09t WHERE idreggas = (SELECT idreggas FROM jctm10t WHERE idregper = ?);"; PreparedStatement stm = conexion.prepareStatement(sql); stm.setString(1, idRegistro); ResultSet rst = stm.executeQuery(); PeriodoCabecera cabecera = null; if (rst.next()) { cabecera = new PeriodoCabecera(); cabecera.setAsociadoA(rst.getString(2)); cabecera.setEstatus(rst.getString(5)); cabecera.setIdentificador(rst.getString(1)); cabecera.setTipo(rst.getString(4)); } return cabecera; } else { throw new GeDocDAOException("El registro con el detalle de control de gasto no existe, verifique " + idRegistro); } } catch (SQLException e) { throw new GeDocDAOException("Ocurrio un error al obtener el registro de control de gasto.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { e.printStackTrace(System.out); } } } @Override public CifraControl getCifraControl(String idPeriodo) throws GeDocDAOException { try { CifraControl cifraControl = new CifraControl(); if (conexion != null && conexion.isClosed()) { conexion = Conexion.getConexion(); } String sql = "SELECT idnumper, dbmonto, inregaso, inregtip, dbmaxaso, dbminaso, dbmaxtip, dbmintip, ajustes " + " FROM jctm02t A LEFT JOIN (" + " SELECT COUNT(idajuste) ajustes, idnumper periodo FROM jctm03t GROUP BY idnumper " + " ) B ON A.idnumper = B.periodo " + " WHERE idnumper = ?;"; PreparedStatement stm = conexion.prepareStatement(sql); stm.setString(1, idPeriodo); ResultSet rst = stm.executeQuery(); if (rst.next()) { cifraControl.setMonto(rst.getDouble(2)); cifraControl.setRegistrosAsociado(rst.getInt(3)); cifraControl.setRegistrosTipo(rst.getInt(4)); cifraControl.setMaxAsociado(rst.getDouble(5)); cifraControl.setMinAsociado(rst.getDouble(6)); cifraControl.setMaxTipo(rst.getDouble(7)); cifraControl.setMinTipo(rst.getDouble(8)); cifraControl.setAjustes(rst.getInt(9)); } return cifraControl; } catch (SQLException e) { throw new GeDocDAOException("Ocurrio un error al obtener las cifras control del registro de gasto.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { e.printStackTrace(System.out); } } } @Override public List<CifraControlAjuste> getCifraControlAjuste(String idPeriodo) throws GeDocDAOException { try { List<CifraControlAjuste> ajustes = new ArrayList<CifraControlAjuste>(); if (conexion != null && conexion.isClosed()) { conexion = Conexion.getConexion(); } String sql = "SELECT A.idajuste, B.dbmonto, B.inregaso, B.inregtip, B.dbmaxaso, B.dbminaso, B.dbmaxtip, B.dbmintip, A.fefecaju " + " FROM jctm03t A INNER JOIN jctm3ct B " + " ON A.idajuste = B.idajuste " + " WHERE idnumper = ?;"; PreparedStatement stm = conexion.prepareStatement(sql); stm.setString(1, idPeriodo); ResultSet rst = stm.executeQuery(); while (rst.next()) { CifraControlAjuste cifraControl = new CifraControlAjuste(); cifraControl.setAjuste(rst.getString(1)); cifraControl.setMonto(rst.getDouble(2)); cifraControl.setRegistrosAsociado(rst.getInt(3)); cifraControl.setRegistrosTipo(rst.getInt(4)); cifraControl.setMaxAsociado(rst.getDouble(5)); cifraControl.setMinAsociado(rst.getDouble(6)); cifraControl.setMaxTipo(rst.getDouble(7)); cifraControl.setMinTipo(rst.getDouble(8)); cifraControl.setFechaAjuste(rst.getDate(9)); cifraControl.setAjustes(0); ajustes.add(cifraControl); } return ajustes; } catch (SQLException e) { throw new GeDocDAOException("Ocurrio un error al obtener el registro de cifras control del ajuste de gasto.", e); } finally { try { if (conexion != null && !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { e.printStackTrace(System.out); } } } @Override public Periodo encuentraPorFecha(String valor) throws GeDocDAOException { try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT idnumper, inanyper, innumper, dtfecape, dtfeccie, instatus, dscoment "); sql.append("FROM jctm01t "); sql.append(" WHERE inanyper = ? AND innumper = ? "); sql.append("ORDER BY inanyper DESC, innumper DESC;"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); String anyper = valor.substring(0, 4); String numper = valor.substring(4); stm.setInt(1, Integer.valueOf(anyper)); stm.setInt(2, Integer.valueOf(numper)); ResultSet rst = stm.executeQuery(); if (rst != null) { if (rst.next()) { Periodo periodo = new Periodo(); periodo.setAny(rst.getInt(2)); periodo.setApertura(rst.getDate(4)); periodo.setCierre(rst.getDate(5) != null ? rst.getDate(5) : null); periodo.setComentario(rst.getString(7)); periodo.setEstatus(rst.getString(6)); periodo.setIdentificador(rst.getString(1)); periodo.setPeriodo(rst.getInt(3)); return periodo; } } return new Periodo(); } catch (NumberFormatException e) { throw new GeDocDAOException("La consulta de datos de periodo genero una excepción, el valor de periodo no es valido " + valor, e); } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de periodo genero una excepción, revise el log para más detalles.", e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public Periodo getPeriodo(int any, int mes) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sql = "SELECT idnumper, inanyper, innumper, dtfecape, dtfeccie, instatus, dscoment FROM jctm01t WHERE inanyper = ? AND innumper = ?;"; PreparedStatement stm = conexion.prepareStatement(sql); stm.setInt(1, any); stm.setInt(2, mes); ResultSet rst = stm.executeQuery(); if (rst != null) { if (rst.next()) { Periodo periodo = new Periodo(); periodo.setIdentificador(rst.getString(1)); periodo.setAny(any); periodo.setPeriodo(mes); periodo.setApertura(rst.getDate(4)); periodo.setCierre(rst.getDate(5)); periodo.setEstatus(rst.getString(6)); periodo.setComentario(rst.getString(7)); return periodo; } } return null; } catch (SQLException e) { throw new GeDocDAOException("La consulta de datos de periodo genero una excepción, el valor de periodo no es valido " + String.valueOf(any) + String.valueOf(mes), e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public List<PeriodoCabecera> getCabecerasAgrupadasPorAsociado(String idPeriodo) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sql = "SELECT dsasocia, SUM(importe) " + " FROM jctm09t A LEFT JOIN ( " + " SELECT idreggas, dbimpreg importe FROM jctm10t " + " ) B ON A.idreggas = B.idreggas " + " WHERE idnumper = ? " + " GROUP BY dsasocia;"; PreparedStatement stm = conexion.prepareStatement(sql); stm.setString(1, idPeriodo); System.out.println("Consulta de cabeceras: " + stm.toString()); ResultSet rst = stm.executeQuery(); if (rst != null) { List<PeriodoCabecera> cabeceras = new ArrayList<PeriodoCabecera>(); while (rst.next()) { PeriodoCabecera pc = new PeriodoCabecera(); pc.setAsociadoA(rst.getString(1)); pc.setImporte(rst.getDouble(2)); cabeceras.add(pc); } return cabeceras; } return null; } catch (SQLException e) { throw new GeDocDAOException("La consulta de cabeceras en el periodo especificado genero una execepción " + idPeriodo + ", " + e.getSQLState(), e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public List<PeriodoCabecera> getCabecerasPorAsociado(String idPeriodo, String asociado) throws GeDocDAOException { List<PeriodoCabecera> cabeceras = new ArrayList<PeriodoCabecera>(); try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT A.idreggas, dsasocia, idnumper, A.intipgas, A.instatus, fefecreg, dsdocto, dsrefdoc, total, cuenta "); sql.append(" FROM jctm09t A LEFT JOIN ("); sql.append(" SELECT idreggas, IFNULL(SUM(dbimpreg), 0) AS total, IFNULL(COUNT(idreggas), 0) cuenta FROM jctc10v GROUP BY idreggas "); sql.append(" ) C ON A.idreggas = C.idreggas "); sql.append(" WHERE (idnumper = ? AND dsasocia LIKE(?));"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.setString(1, idPeriodo); stm.setString(2, "%".concat(asociado).concat("%")); System.out.println("Consulta de cabeceras por asociado: " + stm.toString()); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { PeriodoCabecera pc = new PeriodoCabecera(); pc.setIdentificador(rst.getString(1)); pc.setAsociadoA(rst.getString(2)); pc.setTipo(rst.getString(4)); pc.setEstatus(rst.getString(5)); pc.setFecha(rst.getDate(6)); pc.setDocumento(rst.getString(7)); pc.setReferencia(rst.getString(8)); pc.setRegistros(new ArrayList<PeriodoRegistro>()); pc.setImporte(rst.getDouble(9)); pc.setCuentaFueraPeriodo(rst.getInt(10)); cabeceras.add(pc); } } return cabeceras; } catch (SQLException e) { //System.out.println("Consulta de cabeceras de gasto: " + e.getMessage() + " -> " + e.getCause()); throw new GeDocDAOException("Excepción al consultar registros de cabecera de gastos en el periodo: " + idPeriodo + ", " + asociado + " - " +e.getSQLState(), e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } @Override public List<PeriodoCabecera> getCabeceras(String...params) throws GeDocDAOException { List<PeriodoCabecera> cabeceras = new ArrayList<PeriodoCabecera>(); try { conexion = Conexion.getConexion(); StringBuilder sql = new StringBuilder(); sql.append("SELECT A.idreggas, dsasocia, idnumper, A.intipgas, A.instatus, fefecreg, dsdocto, dsrefdoc, total, cuenta "); sql.append(" FROM jctm09t A LEFT JOIN ("); sql.append(" SELECT idreggas, IFNULL(SUM(dbimpreg), 0) AS total, IFNULL(COUNT(idreggas), 0) cuenta FROM jctc10v GROUP BY idreggas "); sql.append(" ) C ON A.idreggas = C.idreggas "); if (params.length>0){ sql.append(" WHERE "); boolean started = false; for (String param: params){ if (param.startsWith("fechaInicia")){ // set condition to fechaInicia sql.append(" fefecreg >= '").append(Comunes.DMAtoFechaSQL(param.substring(param.indexOf(":")+1))).append("' "); started=true; } else if(param.startsWith("fechaTermina")){ if (started) sql.append(" AND "); sql.append(" fefecreg <= '").append(Comunes.DMAtoFechaSQL(param.substring(param.indexOf(":")+1))).append("' "); started=true; } else if(param.startsWith("asignadoA")){ if (started) sql.append(" AND "); sql.append(" dsasocia LIKE '%").append(param.substring(param.indexOf(":")+1)).append("%' "); started=true; } else if(param.startsWith("documento")){ if (started) sql.append(" AND "); sql.append(" dsdocto LIKE '%").append(param.substring(param.indexOf(":")+1)).append("%' "); started=true; } else if(param.startsWith("referencia")){ if (started) sql.append(" AND "); sql.append(" dsrefdoc = '%").append(param.substring(param.indexOf(":")+1)).append("%' "); started=true; } else if(param.startsWith("estatus")){ if (started) sql.append(" AND "); sql.append(" A.instatus = '").append(param.substring(param.indexOf(":")+1)).append("' "); started=true; } } } sql.append(" ORDER BY fefecreg desc, A.intipgas desc, dsasocia"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); System.out.println("Consulta de cabeceras: " + stm.toString()); ResultSet rst = stm.executeQuery(); if (rst != null) { while (rst.next()) { PeriodoCabecera pc = new PeriodoCabecera(); pc.setIdentificador(rst.getString(1)); pc.setAsociadoA(rst.getString(2)); pc.setTipo(rst.getString(4)); pc.setEstatus(rst.getString(5)); pc.setFecha(rst.getDate(6)); pc.setDocumento(rst.getString(7)); pc.setReferencia(rst.getString(8)); pc.setRegistros(new ArrayList<PeriodoRegistro>()); pc.setImporte(rst.getDouble(9)); pc.setCuentaFueraPeriodo(rst.getInt(10)); cabeceras.add(pc); } } return cabeceras; } catch (SQLException e) { throw new GeDocDAOException("Excepción al consultar las cabeceras de gastos: " + e.getSQLState(), e); } finally { try { if (conexion != null & !conexion.isClosed()) { conexion.close(); } } catch (SQLException e) { } } } } <file_sep>/test2.sh #!/bin/bash MY_PROMPT='$ ' #while : #do echo -n "$MY_PROMPT" read line echo "$line" # done exit 0 <file_sep>/src/java/com/ftc/webcom/servlets/ExpenditurePersonas.java package com.ftc.webcom.servlets; import com.ftc.aq.Comunes; import com.ftc.gedoc.bo.ContactoBO; import com.ftc.gedoc.bo.PeriodoBo; import com.ftc.gedoc.bo.impl.ContactoBOImpl; import com.ftc.gedoc.bo.impl.PeriodoBOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.utiles.Contacto; import java.io.IOException; import java.io.PrintWriter; import java.util.List; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; public class ExpenditurePersonas extends HttpServlet { @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=ISO-8859-1"); PrintWriter out = response.getWriter(); String jsonPoutput = ""; HttpSession session = request.getSession(); try { String filtro = request.getParameter("term"); String persona = session.getAttribute("persona")!=null?(String)session.getAttribute("persona"):""; //PeriodoBo bo = new PeriodoBOImpl(); //List<String> usuarios = bo.filtraPersonasAsociadas(filtro); ContactoBO bo = new ContactoBOImpl(); List<Contacto> contactos = bo.contactoPorEmpresa(persona, filtro); String callBackJavaScripMethodName = request.getParameter("callback"); String json = "["; for (Contacto contacto: contactos) { StringBuilder nombre = new StringBuilder(); nombre.append(contacto.getNombre()).append(" ").append(contacto.getApellido()); json += String.format("{label:'%s', usuario:'%s'},",nombre.toString(), contacto.getCorreo()); } json += "]"; jsonPoutput = callBackJavaScripMethodName + "(" + json + ");"; } catch (GeDocBOException e) { jsonPoutput = "No hay resultado. " + e.getMessage(); Comunes.escribeLog(getServletContext().getInitParameter("logLocation"), e, (String) request.getSession().getAttribute("usuario")); } finally { out.print(jsonPoutput); } } @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { //processRequest(request, response); } @Override public String getServletInfo() { return "AutoComplete para las personas asociadas a Gastos"; } } <file_sep>/src/java/com/ftc/gedoc/utiles/comparators/ExpensesComparatorPorReferencia.java package com.ftc.gedoc.utiles.comparators; import com.ftc.gedoc.utiles.PeriodoCabecera; import java.util.Comparator; public class ExpensesComparatorPorReferencia implements Comparator<PeriodoCabecera>{ @Override public int compare(PeriodoCabecera o1, PeriodoCabecera o2) { return o1.getReferencia().compareTo(o2.getReferencia()); } } <file_sep>/src/java/com/ftc/gedoc/dao/CEPArchivoDAO.java package com.ftc.gedoc.dao; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.services.invoice.modelo.CEPArchivo; import java.util.List; public interface CEPArchivoDAO { CEPArchivo registraCEP(CEPArchivo archivo) throws GeDocDAOException; CEPArchivo actualizaCEP(CEPArchivo archivo) throws GeDocDAOException; CEPArchivo obtieneCEP(String identificador) throws GeDocDAOException; boolean eliminaCEP(String identificador) throws GeDocDAOException; List<CEPArchivo> listar(String proveedor) throws GeDocDAOException; List<CEPArchivo> listar(String proveedor, String fechaInicial, String fechaFinal) throws GeDocDAOException; } <file_sep>/src/java/com/ftc/webcom/servlets/UploadFileTemp.java package com.ftc.webcom.servlets; import com.ftc.services.invoice.CFDIReader; import com.ftc.services.invoice.modelo.CFDICabecera; import java.io.File; import java.io.IOException; import java.io.PrintWriter; import java.util.Iterator; import java.util.List; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.fileupload.FileItem; import org.apache.commons.fileupload.disk.DiskFileItemFactory; import org.apache.commons.fileupload.servlet.ServletFileUpload; public class UploadFileTemp extends HttpServlet { protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=UTF-8"); PrintWriter out = response.getWriter(); try { File file; int maxFileSize = 200 * 1024; int maxMemSize = 2000 * 1024; ServletContext context = getServletContext(); String filePath = "/tmp/"; //context.getInitParameter("factura"); String contentType = request.getContentType(); if ((contentType.indexOf("multipart/form-data") >= 0)) { DiskFileItemFactory factory = new DiskFileItemFactory(); factory.setSizeThreshold(maxMemSize); factory.setRepository(new File(filePath)); ServletFileUpload upload = new ServletFileUpload(factory); upload.setSizeMax(maxFileSize); try { List fileItems = upload.parseRequest(request); Iterator i = fileItems.iterator(); while (i.hasNext()) { FileItem fi = (FileItem) i.next(); if (!fi.isFormField()) { String fieldName = fi.getFieldName(); String fileName = fi.getName(); boolean isInMemory = fi.isInMemory(); long sizeInBytes = fi.getSize(); file = new File(filePath + fileName.substring(fileName.lastIndexOf("/") + 1)); fi.write(file); StringBuilder archivo = new StringBuilder(); archivo.append(filePath).append(fileName); System.out.println("Uploaded temp file: " + archivo.toString()); //lee el archivo xml CFDICabecera xmlDoc = CFDIReader.procesaXML(archivo.toString()); if (xmlDoc != null) { out.print("fecha:"+xmlDoc.getStrFecha()+",importe:"+xmlDoc.getTotal()+",impuestos:"+xmlDoc.getTotalImpuestosTrasladados()); } else { out.printf("No se logro leer el archivo %s como un CFD/CFDI valido.", fileName); } } } } catch (Exception ex) { System.out.println(ex); } } else { out.println("No file to upload."); } } finally { out.close(); } } @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { //processRequest(request, response); } @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { processRequest(request, response); } @Override public String getServletInfo() { return "Short description"; } } <file_sep>/src/java/com/ftc/gedoc/bo/impl/ContactoBOImpl.java package com.ftc.gedoc.bo.impl; import com.ftc.gedoc.bo.ContactoBO; import com.ftc.gedoc.dao.ContactoDAO; import com.ftc.gedoc.dao.impl.ContactoDAOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Contacto; import java.util.List; public class ContactoBOImpl implements ContactoBO{ private ContactoDAO dao; public ContactoBOImpl(){ dao = new ContactoDAOImpl(); } @Override public Contacto buscarPorCorreo(String correo) throws GeDocBOException { try{ return dao.encuentraPorCorreo(correo); }catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public List<Contacto> contactoPorEmpresa(String idEmpresa) throws GeDocBOException { try{ return dao.listarContactosPorEmpresa(idEmpresa); }catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public Contacto buscarPorId(String id) throws GeDocBOException { try{ return dao.encuentraPorId(id); }catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public List<Contacto> contactoPorEmpresa(String idEmpresa, String filtro) throws GeDocBOException { try{ return dao.listarContactosPorEmpresa(idEmpresa, filtro); }catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public List<Contacto> obtieneContactos(String sesion) throws GeDocBOException { try { return dao.obtieneContactos(sesion); } catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public List<Contacto> obtieneContactos(String empresa, String tipo, String sesion) throws GeDocBOException { try { return dao.obtieneContactos(empresa, tipo, sesion); } catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public Contacto buscaContactoPorCorreo(String empresa, String correo, String sesion) throws GeDocBOException { try { return dao.buscaContactoPorCorreo(empresa, correo, sesion); } catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public List<Contacto> listaContactos(String sesion) throws GeDocBOException { try { return dao.listaContactos(sesion); } catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public List<Contacto> listaContactosSuspendidos(String sesion) throws GeDocBOException { try { return dao.listaContactosSuspendidos(sesion); } catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public Contacto insertaContacto(Contacto contacto, String contrasenia, String sesion) throws GeDocBOException { try { return dao.insertaContacto(contacto, contrasenia, sesion); } catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public Contacto actualizarContacto(Contacto contacto, String sesion) throws GeDocBOException { try { return dao.actualizarContacto(contacto, sesion); } catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public boolean eliminarContacto(Contacto contacto, String sesion) throws GeDocBOException { try { return dao.eliminarContacto(contacto, sesion); } catch(GeDocDAOException e){ throw new GeDocBOException(e); } } } <file_sep>/src/java/com/ftc/webcom/servlets/PersonasRFC.java package com.ftc.webcom.servlets; import com.ftc.aq.Comunes; import com.ftc.aq.Conexion; import com.ftc.gedoc.bo.PersonaBO; import com.ftc.gedoc.bo.impl.PersonaBOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.utiles.Persona; import java.io.IOException; import java.io.PrintWriter; import java.sql.Connection; import java.sql.SQLException; import java.util.Collection; import java.util.Iterator; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; public class PersonasRFC extends HttpServlet { protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=ISO-8859-1"); PrintWriter out = response.getWriter(); String jsonPoutput = ""; String sesion = request.getSession().getId(); String tipo = request.getParameter("tipo"); try { PersonaBO bo = new PersonaBOImpl(); Collection<Persona> rst = bo.localizaPersonasPorRFC(tipo.charAt(0), request.getParameter("term"), sesion); Iterator<Persona> personas = rst.iterator(); String callBackJavaScripMethodName = request.getParameter("callback"); String json = "["; while (personas.hasNext()) { Persona persona = personas.next(); json += "{ label: '" + persona.getRfc() + "', id: '" + persona.getIdentificador() + "', nombre: '" + persona.getNombre() + "' },"; } json += "]"; jsonPoutput = callBackJavaScripMethodName + "(" + json + ");"; } catch (GeDocBOException e) { jsonPoutput = "error:"+e.getMessage(); Comunes.escribeLog(getServletContext().getInitParameter("logLocation"), e,(String)request.getSession().getAttribute("usuario")); } finally { out.print(jsonPoutput); } } @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { processRequest(request, response); } @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { //processRequest(request, response); } /** * Returns a short description of the servlet. * * @return a String containing servlet description */ @Override public String getServletInfo() { return "Locate a person by tipo and RFC"; }// </editor-fold> } <file_sep>/src/java/com/ftc/webcom/servlets/Contactos.java package com.ftc.webcom.servlets; import com.ftc.aq.Comunes; import com.ftc.aq.Conexion; import com.ftc.gedoc.bo.ContactoBO; import com.ftc.gedoc.bo.impl.ContactoBOImpl; import com.ftc.gedoc.utiles.Contacto; import java.io.IOException; import java.io.PrintWriter; import java.sql.Connection; import java.sql.SQLException; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; public class Contactos extends HttpServlet { @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { //processRequest(request, response); } @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=ISO-8859-1"); PrintWriter out = response.getWriter(); Connection conexion = null; String mensaje = ""; try { String comando = request.getParameter("cmd"); String sesion = request.getSession().getId(); if (comando.equals(Comunes.toMD5("registraContacto-" + sesion))) { conexion = Conexion.getConexion(); String persona = request.getParameter("persona"); String nombre = request.getParameter("nombre"); String apellido = request.getParameter("apellido"); String correo = request.getParameter("correo"); String telefono = request.getParameter("telefono"); String movil = request.getParameter("movil"); String contrasenia = request.getParameter("contrasenia"); String grupo = request.getParameter("grupo"); Contacto contacto = new Contacto(persona, nombre, apellido, correo, telefono, movil, grupo); ContactoBO bo = new ContactoBOImpl(); contacto = bo.insertaContacto(contacto, contrasenia, sesion); if (contacto!=null) { mensaje = "Registro generado correctamente."; }else{ throw new Exception("No se logró determinar el estado del registro."); } } else { throw new Exception("No se identifico la operación solicitada."); } } catch (SQLException sqle) { mensaje = sqle.getSQLState().equals("0") ? sqle.getMessage() : "Excepción al registrar la persona. " + sqle.getSQLState() + "-" + sqle.getErrorCode(); Comunes.escribeLog(getServletContext().getInitParameter("logLocation"), sqle, (String) request.getSession().getAttribute("usuario")); } catch (Exception e) { mensaje = "Excepción al registrar la persona. " + e.getMessage(); Comunes.escribeLog(getServletContext().getInitParameter("logLocation"), e, (String) request.getSession().getAttribute("usuario")); } finally { if (conexion != null) { try { if (!conexion.isClosed()) { conexion.close(); } } catch (SQLException sqle) { //NOTHING TO DO ... } } out.println(mensaje); out.flush(); out.close(); } } @Override public String getServletInfo() { return "Registrar contactos"; } } <file_sep>/src/java/com/ftc/gedoc/utiles/FacturaCabecera.java package com.ftc.gedoc.utiles; public class FacturaCabecera { private String persona; private String cdDocumento; private String archivo; private String locacion; private String serie; private String folio; private String tipo; private String strFecha; private String formaDePago; private String strSubTotal; private double subTotal; private String strDescuento; private double descuento; private String tipoCambio; private String moneda; private String strTotal; private double total; private String metodoDePago; private String lugarExpedicion; private String rfc; private String nombre; private String strTotalImpuestosTrasladados; private double totalImpuestosTrasladados; private String uuid; private String strFechaTimbrado; private String rfcReceptor; private String nombreReceptor; private String iva_strTasa; private double iva_tasa; private String iva_strImporte; private double iva_importe; private String ieps_strTasa; private double ieps_tasa; private String ieps_strImporte; private double ieps_importe; public FacturaCabecera(){} public String getCdDocumento() { return cdDocumento; } public void setCdDocumento(String cdDocumento) { this.cdDocumento = cdDocumento; } public String getPersona() { return persona; } public void setPersona(String persona) { this.persona = persona; } public String getArchivo() { return archivo; } public void setArchivo(String archivo) { this.archivo = archivo; } public String getLocacion() { return locacion; } public void setLocacion(String locacion) { this.locacion = locacion; } public String getSerie() { return serie; } public void setSerie(String serie) { this.serie = serie; } public String getFolio() { return folio; } public void setFolio(String folio) { this.folio = folio; } public String getTipo() { return tipo; } public void setTipo(String tipo) { this.tipo = tipo; } public String getStrFecha() { return strFecha; } public void setStrFecha(String strFecha) { this.strFecha = strFecha; } public String getFormaDePago() { return formaDePago; } public void setFormaDePago(String formaDePago) { this.formaDePago = formaDePago; } public String getStrSubTotal() { return strSubTotal; } public void setStrSubTotal(String strSubTotal) { this.strSubTotal = strSubTotal; } public double getSubTotal() { return subTotal; } public void setSubTotal(double subTotal) { this.subTotal = subTotal; } public String getStrDescuento() { return strDescuento; } public void setStrDescuento(String strDescuento) { this.strDescuento = strDescuento; } public double getDescuento() { return descuento; } public void setDescuento(double descuento) { this.descuento = descuento; } public String getTipoCambio() { if(tipoCambio!=null && !tipoCambio.isEmpty()){ return tipoCambio; }else{ return "0"; } } public void setTipoCambio(String tipoCambio) { this.tipoCambio = tipoCambio; } public String getMoneda() { return moneda; } public void setMoneda(String moneda) { this.moneda = moneda; } public String getStrTotal() { return strTotal; } public void setStrTotal(String strTotal) { this.strTotal = strTotal; } public double getTotal() { return total; } public void setTotal(double total) { this.total = total; } public String getMetodoDePago() { return metodoDePago; } public void setMetodoDePago(String metodoDePago) { this.metodoDePago = metodoDePago; } public String getLugarExpedicion() { return lugarExpedicion; } public void setLugarExpedicion(String lugarExpedicion) { this.lugarExpedicion = lugarExpedicion; } public String getRfc() { return rfc; } public void setRfc(String rfc) { this.rfc = rfc; } public String getNombre() { return nombre; } public void setNombre(String nombre) { this.nombre = nombre; } public String getStrTotalImpuestosTrasladados() { return strTotalImpuestosTrasladados; } public void setStrTotalImpuestosTrasladados(String strTotalImpuestosTrasladados) { this.strTotalImpuestosTrasladados = strTotalImpuestosTrasladados; } public double getTotalImpuestosTrasladados() { return totalImpuestosTrasladados; } public void setTotalImpuestosTrasladados(double totalImpuestosTrasladados) { this.totalImpuestosTrasladados = totalImpuestosTrasladados; } public String getUuid() { return uuid; } public void setUuid(String uuid) { this.uuid = uuid; } public String getStrFechaTimbrado() { return strFechaTimbrado; } public void setStrFechaTimbrado(String strFechaTimbrado) { this.strFechaTimbrado = strFechaTimbrado; } public String getRfcReceptor() { return rfcReceptor; } public void setRfcReceptor(String rfcReceptor) { this.rfcReceptor = rfcReceptor; } public String getNombreReceptor() { return nombreReceptor; } public void setNombreReceptor(String nombreReceptor) { this.nombreReceptor = nombreReceptor; } public String getIva_strTasa() { return iva_strTasa; } public void setIva_strTasa(String iva_strTasa) { this.iva_strTasa = iva_strTasa; } public double getIva_tasa() { return iva_tasa; } public void setIva_tasa(double iva_tasa) { this.iva_tasa = iva_tasa; } public String getIva_strImporte() { return iva_strImporte; } public void setIva_strImporte(String iva_strImporte) { this.iva_strImporte = iva_strImporte; } public double getIva_importe() { return iva_importe; } public void setIva_importe(double iva_importe) { this.iva_importe = iva_importe; } public String getIeps_strTasa() { return ieps_strTasa; } public void setIeps_strTasa(String ieps_strTasa) { this.ieps_strTasa = ieps_strTasa; } public double getIeps_tasa() { return ieps_tasa; } public void setIeps_tasa(double ieps_tasa) { this.ieps_tasa = ieps_tasa; } public String getIeps_strImporte() { return ieps_strImporte; } public void setIeps_strImporte(String ieps_strImporte) { this.ieps_strImporte = ieps_strImporte; } public double getIeps_importe() { return ieps_importe; } public void setIeps_importe(double ieps_importe) { this.ieps_importe = ieps_importe; } } <file_sep>/src/java/com/ftc/gedoc/dao/GrupoDAO.java package com.ftc.gedoc.dao; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Grupo; import java.util.List; public interface GrupoDAO { List<Grupo> listado() throws GeDocDAOException; Grupo registrar(Grupo grupo) throws GeDocDAOException; Grupo actualizaPermisosPorId(String id, long permisos) throws GeDocDAOException; Grupo actualizar(Grupo grupo) throws GeDocDAOException; void eliminar(Grupo grupo) throws GeDocDAOException; void eliminarPorId(String id) throws GeDocDAOException; Grupo encuentraPorId(String id) throws GeDocDAOException; } <file_sep>/src/java/com/ftc/gedoc/dao/impl/DocumentoDAOImpl.java package com.ftc.gedoc.dao.impl; import com.ftc.aq.Conexion; import com.ftc.gedoc.dao.DocumentoDAO; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Documento; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; public class DocumentoDAOImpl implements DocumentoDAO { Connection conexion; @Override public Documento recuperaDocumento(String identificador) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sql = "SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(A.tmstmp) " + " FROM jdem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson " + " WHERE cddocele = ?;"; PreparedStatement statement = conexion.prepareStatement(sql); statement.setString(1, identificador); ResultSet rst = statement.executeQuery(); if (rst != null) { if (rst.first()) { Documento documento = new Documento(); documento.setPersona(rst.getString(1)); documento.setEmpresa(rst.getString(2)); documento.setIdentificador(rst.getString(3)); documento.setArchivos(rst.getString(4)); documento.setTitulo(rst.getString(5)); documento.setObservaciones(rst.getString(6)); documento.setFecha(rst.getDate(8)); return documento; } } return null; } catch (SQLException e) { throw new GeDocDAOException("Imposible recuperar el documento. " + e.getMessage()); } finally { if (conexion != null) { try { conexion.close(); } catch (SQLException e) { } } } } @Override public List<Documento> recuperaDocumentos(String elementos) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String[] cadenaIdentificador = elementos.split(","); StringBuilder identificadores = new StringBuilder(); for (String id : cadenaIdentificador) { identificadores.append("'").append(id).append("',"); } String sql = "SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(A.tmstmp) " + " FROM jdem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson " + " WHERE cddocele IN (" + identificadores.substring(0, identificadores.length() - 1) + ");"; PreparedStatement statement = conexion.prepareStatement(sql); //statement.setString(1, identificador); List<Documento> documentos = new ArrayList<Documento>(); ResultSet rst = statement.executeQuery(); while (rst.next()) { Documento documento = new Documento(); documento.setPersona(rst.getString(1)); documento.setEmpresa(rst.getString(2)); documento.setIdentificador(rst.getString(3)); documento.setArchivos(rst.getString(4)); documento.setTitulo(rst.getString(5)); documento.setObservaciones(rst.getString(6)); documento.setFecha(rst.getDate(8)); documentos.add(documento); } return documentos; } catch (SQLException e) { throw new GeDocDAOException("Imposible recuperar los documentos, " + e.getMessage()); } finally { if (conexion != null) { try { conexion.close(); } catch (SQLException e) { } } } } @Override public int eliminaDocumento(Documento documento) throws GeDocDAOException { try { conexion = Conexion.getConexion(); String sql = "DELETE FROM jdem10t WHERE cddocele = ?;"; PreparedStatement statement = conexion.prepareStatement(sql); statement.setString(1, documento.getIdentificador()); return statement.executeUpdate(); } catch (SQLException e) { throw new GeDocDAOException("Imposible eliminar el documento, " + e.getMessage()); } finally { if (conexion != null) { try { conexion.close(); } catch (SQLException e) { } } } } @Override public Documento findById(String id) throws GeDocDAOException { try { conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement("SELECT A.cdperson, A.cddocele, A.dsfiles, A.dstitle, A.dsobserv, A.instatus, A.tmstmp, " + " (SELECT dsrazsoc FROM jpem00t WHERE cdperson = A.cdperson)" + " FROM jdem10t A WHERE A.cddocele = ?;"); stm.setString(1, id); ResultSet rst = stm.executeQuery(); Documento documento = new Documento(); if (rst.next()) { documento.setArchivos(rst.getString(3)); documento.setEmpresa(rst.getString(8)); documento.setFecha(rst.getTimestamp(7)); documento.setIdentificador(rst.getString(2)); documento.setObservaciones(rst.getString(5)); documento.setPersona(rst.getString(1)); documento.setTitulo(rst.getString(4)); } return documento; } catch (SQLException e) { throw new GeDocDAOException("Imposible encontrar el documento, " + e.getMessage()); } finally { if (conexion != null) { try { conexion.close(); } catch (SQLException e) { } } } } } <file_sep>/src/java/com/ftc/webcom/servlets/GrupoSeguridad.java package com.ftc.webcom.servlets; import com.ftc.aq.Comunes; import com.ftc.gedoc.bo.GrupoBO; import com.ftc.gedoc.bo.impl.GrupoBOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import java.io.IOException; import java.io.PrintWriter; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; public class GrupoSeguridad extends HttpServlet { protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=UTF-8"); PrintWriter out = response.getWriter(); HttpSession session = request.getSession(); try { String idgrupo = request.getParameter("idgrupo")==null?"":request.getParameter("idgrupo"); if(idgrupo!=null && !idgrupo.isEmpty() && idgrupo.startsWith((Comunes.toMD5("chmod")+session.getId()).toUpperCase())){ idgrupo = idgrupo.substring((Comunes.toMD5("chmod")+session.getId()).length()); String usuarios = request.getParameter("usuarios")==null?"0":request.getParameter("usuarios"); String cambiar = request.getParameter("cambiar")==null?"0":request.getParameter("cambiar"); String suspender = request.getParameter("suspender")==null?"0":request.getParameter("suspender"); String activar = request.getParameter("activar")==null?"0":request.getParameter("activar"); String grupos = request.getParameter("grupos")==null?"0":request.getParameter("grupos"); String pregistro = request.getParameter("pregistro")==null?"0":request.getParameter("pregistro"); String pcontacto = request.getParameter("pcontacto")==null?"0":request.getParameter("pcontacto"); String pverdoc = request.getParameter("pverdoc")==null?"0":request.getParameter("pverdoc"); String psubirdoc = request.getParameter("psubirdoc")==null?"0":request.getParameter("psubirdoc"); String pnotificaciones = request.getParameter("pnotificaciones")==null?"0":request.getParameter("pnotificaciones"); String pestado = request.getParameter("pestado")==null?"0":request.getParameter("pestado"); String peliminar = request.getParameter("peliminar")==null?"0":request.getParameter("peliminar"); String pdescarga = request.getParameter("pdescarga")==null?"0":request.getParameter("pdescarga"); String cregistro = request.getParameter("cregistro")==null?"0":request.getParameter("cregistro"); String ccontacto = request.getParameter("ccontacto")==null?"0":request.getParameter("ccontacto"); String cverdoc = request.getParameter("cverdoc")==null?"0":request.getParameter("cverdoc"); String csubirdoc = request.getParameter("csubirdoc")==null?"0":request.getParameter("csubirdoc"); String cnotificaciones = request.getParameter("cnotificaciones")==null?"0":request.getParameter("cnotificaciones"); String cestado = request.getParameter("cestado")==null?"0":request.getParameter("cestado"); String celiminar = request.getParameter("celiminar")==null?"0":request.getParameter("celiminar"); String cdescarga = request.getParameter("cdescarga")==null?"0":request.getParameter("cdescarga"); String viaticos = request.getParameter("viaticos")==null?"0":request.getParameter("viaticos"); String caja = request.getParameter("caja")==null?"0":request.getParameter("caja"); String aduanales = request.getParameter("aduanales")==null?"0":request.getParameter("aduanales"); String administrador = request.getParameter("administrador")==null?"0":request.getParameter("administrador"); StringBuilder sModo = new StringBuilder(); sModo.append(pregistro).append(pcontacto).append(pverdoc).append(psubirdoc).append(pnotificaciones).append(pestado).append(peliminar).append(pdescarga); sModo.append(cregistro).append(ccontacto).append(cverdoc).append(csubirdoc).append(cnotificaciones).append(cestado).append(celiminar).append(cdescarga); sModo.append(viaticos).append(caja).append(aduanales).append(administrador); sModo.append(usuarios).append(cambiar).append(suspender).append(activar).append(grupos); int modo = Integer.parseInt(sModo.toString(), 2); GrupoBO bo = new GrupoBOImpl(); bo.asignarPermisos(idgrupo.toLowerCase(), modo); /* TODO output your page here. You may use following sample code. */ out.println("<!DOCTYPE html>"); out.println("<html>"); out.println("<head>"); out.println("<title>::: ctrldoce.asignación de permisos :::</title>"); out.println("</head>"); out.println("<body>"); out.println("Se actualizaron correctamente los permisos"); out.println("<script>setTimeout('location.replace(\"../../xuser/grupo.jsp\")',1521);</script>"); out.println("</body>"); out.println("</html>"); } else { throw new GeDocBOException("No se localizo un grupo valido."); } }catch(GeDocBOException e){ out.println("<!DOCTYPE html>"); out.println("<html>"); out.println("<head>"); out.println("<title>::: ctrldoce.asignación de permisos :::</title>"); out.println("</head>"); out.println("<body>"); out.println(e.getMessage()); out.println("<script>setTimeout('location.replace(\"../../xuser/grupo.jsp\")',1521);</script>"); out.println("</body>"); out.println("</html>"); } finally { out.close(); } } @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { //processRequest(request, response); } @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { processRequest(request, response); } @Override public String getServletInfo() { return "::: ctrldoce.asignacion de permisos :::"; } } <file_sep>/src/java/com/ftc/gedoc/dao/CEPConceptoDAO.java package com.ftc.gedoc.dao; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.services.invoice.modelo.CEPConcepto; import java.util.List; public interface CEPConceptoDAO { CEPConcepto registraCEPConcepto(CEPConcepto cepConcepto) throws GeDocDAOException; CEPConcepto actualizaCEPConcepto(CEPConcepto cepConcepto) throws GeDocDAOException; int eliminaCEPConcepto(String claveProductoServicio) throws GeDocDAOException; CEPConcepto obtieneCEPConcepto(String claveProductoServicio) throws GeDocDAOException; List<CEPConcepto> listaCEPConcepto() throws GeDocDAOException; } <file_sep>/src/java/com/ftc/gedoc/utiles/comparators/ExpensesComparatorPorDocumento.java package com.ftc.gedoc.utiles.comparators; import com.ftc.gedoc.utiles.PeriodoCabecera; import java.util.Comparator; public class ExpensesComparatorPorDocumento implements Comparator<PeriodoCabecera>{ @Override public int compare(PeriodoCabecera o1, PeriodoCabecera o2) { return o1.getDocumento().compareTo(o2.getDocumento()); } } <file_sep>/src/java/com/ftc/gedoc/utiles/TipoComprobante.java package com.ftc.gedoc.utiles; import com.ftc.gedoc.exceptions.GeDocDAOException; import java.util.Map; import java.util.TreeMap; public class TipoComprobante { private String tipoGasto; private String codigo; private String descripcion; private final static String[][] VENDEDORES = {{"VH","Hospedaje"}, {"VA","Alimentos"}, {"VP","Peajes"}, {"VN","No deducibles"}, {"VT","Transporte local"},{"VD","Diversos"}}; private final static String[][] CAJA = {{"CF","Fletes"}, {"CP","Papelería"}, {"CQ","Pquetería"}, {"CS","Despensa"}, {"CN","No deducibles"},{"CM","Manto bodega"},{"CT","Manto eq transp"},{"CA","Alimentos"},{"CH","Hospedaje"},{"CJ","Peaje"},{"CD","Diversos"}}; private final static String[][] AGENTES = {{"AH","Honorarios"}, {"AA","Almacenaje"}, {"AM","Maniobras"}, {"AF","Fumigaciones"}, {"AC","Fletes y casetas"},{"AT","DTA"},{"AP","PRV"},{"AV","Reconocimiento previo"},{"AS","Servicios complementarios"}, {"AE","Procesamiento electrónico"},{"AD","Otros gastos"}}; private TipoComprobante() {} public final static Map<String, String> listaTipoComprobante(String tipoGasto) throws GeDocDAOException{ Map<String, String> mapa = new TreeMap<String, String>(); if (tipoGasto.equals("h")){ for(String[] elemento:CAJA){ mapa.put(elemento[0], elemento[1]); } return mapa; } else if(tipoGasto.equals("t")){ for(String[] elemento:VENDEDORES){ mapa.put(elemento[0], elemento[1]); } return mapa; } else if(tipoGasto.equals("a")){ for(String[] elemento:AGENTES){ mapa.put(elemento[0], elemento[1]); } return mapa; } else { throw new GeDocDAOException(String.format("El tipo de gasto %s no se identifico, por lo que no hay listado.", tipoGasto)); } } public final static String getDescripcion(String codigo){ for(String[] tipo:VENDEDORES){ if(tipo[0].equals(codigo)){ return tipo[1]; } } for(String[] tipo:CAJA){ if(tipo[0].equals(codigo)){ return tipo[1]; } } for(String[] tipo:AGENTES){ if(tipo[0].equals(codigo)){ return tipo[1]; } } return ""; } public String getTipoGasto() { return tipoGasto; } public void setTipoGasto(String tipoGasto) { this.tipoGasto = tipoGasto; } public String getCodigo() { return codigo; } public void setCodigo(String codigo) { this.codigo = codigo; } public String getDescripcion(){ return this.descripcion; } public void setDescripcion(String descripcion){ this.descripcion = descripcion; } } <file_sep>/src/java/com/ftc/services/invoice/modelo/CEPArchivo.java package com.ftc.services.invoice.modelo; import java.util.Date; public class CEPArchivo { private String identificador; private String persona; private String titulo; private String observaciones; private String archivos; private String uuid; private Date fecha; private String estatus; private String usuario; private String nombre; public String getIdentificador() { return identificador; } public void setIdentificador(String identificador) { this.identificador = identificador; } public String getPersona(){ return this.persona; } public void setPersona(String persona){ this.persona = persona; } public String getTitulo() { return titulo; } public void setTitulo(String titulo) { this.titulo = titulo; } public String getObservaciones() { return observaciones; } public void setObservaciones(String observaciones) { this.observaciones = observaciones; } public String getArchivos() { return archivos; } public void setArchivos(String archivos) { this.archivos = archivos; } public String getUuid() { return uuid; } public void setUuid(String uuid) { this.uuid = uuid; } public Date getFecha() { return fecha; } public void setFecha(Date fecha) { this.fecha = fecha; } public String getEstatus() { return estatus; } public void setEstatus(String estatus) { this.estatus = estatus; } public String getUsuario() { return usuario; } public void setUsuario(String usuario) { this.usuario = usuario; } public String getNombre() { return nombre; } public void setNombre(String nombre) { this.nombre = nombre; } @Override public String toString() { return "CEPArchivo{" + "identificador=" + identificador + ", titulo=" + titulo + ", observaciones=" + observaciones + ", archivos=" + archivos + '}'; } } <file_sep>/db/procedures.sql DELIMITER // DROP FUNCTION IF EXISTS getUser; CREATE FUNCTION getUser() RETURNS VARCHAR(32) DETERMINISTIC RETURN SUBSTRING(user(),1,INSTR(user(),'@')-1); DROP PROCEDURE IF EXISTS ingresaSolicitudReset; CREATE PROCEDURE ingresaSolicitudReset(IN correo VARCHAR(120), IN rfc VARCHAR(13), IN agente VARCHAR(500), OUT referencia VARCHAR(64), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = correo AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); IF(existe>0)THEN IF (agente='')THEN SET error = 'Error 50051: No se han especificado todos los valores requeridos.'; ELSE SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE dsmail = correo AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP); IF(existe>0)THEN SET error = 'Error 50054: Hay una solicitud pendiente, no se puede registrar otra.'; ELSE SELECT CURRENT_TIMESTAMP INTO cts; SELECT CONCAT(MD5(CONCAT(cts,'zreset')),MD5(CONCAT(cts,correo))) INTO referencia; INSERT INTO jsegsot (cdsolres,dsmail,dsrfc,tsfecsol,tsfecdwn,instatus,dsagent,tmstmp) VALUES (referencia,correo,rfc,CURRENT_TIMESTAMP,ADDTIME(CURRENT_TIMESTAMP, '24:00:000.0'),'A',agente,cts); SET error = ''; END IF; END IF; ELSE SET error = 'Error 50050: Este usuario no se encuentra registrado.'; END IF; END; // DROP PROCEDURE IF EXISTS aplicaSolicitudReset; CREATE PROCEDURE aplicaSolicitudReset(IN identificador VARCHAR(64), IN valenc VARCHAR(16), IN ipfrom VARCHAR(15), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE contacto CHAR(16) DEFAULT ''; DECLARE rfc VARCHAR(13) DEFAULT ''; DECLARE usuario VARCHAR(100) DEFAULT ''; SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE cdsolres = identificador AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP) ; IF(existe>0)THEN SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE cdsolres = identificador AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP) AND instatus = 'A'; IF(existe>0)THEN SELECT dsmail, dsrfc INTO usuario, rfc FROM jsegsot WHERE cdsolres = identificador; SELECT cdcontac INTO contacto FROM jpem10t WHERE dsmail = usuario; UPDATE jusm01t SET dsvalenc = CONCAT(MD5(CONCAT(contacto,tmstmp)),MD5(valenc)), programa = CONCAT('RSPWD',ipfrom) WHERE cdusulog = usuario AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); SET error = ''; ELSE SET error = 'Error 50053: Esta solicitud ya fue aplicada.'; END IF; ELSE SET error = 'Error 50052: Esta solicitud ya no existe.'; END IF; END; // DROP PROCEDURE IF EXISTS listaDocumentos; CREATE PROCEDURE listaDocumentos(IN persona CHAR(16), IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0) THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(A.tmstmp) FROM jdem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE intipprs = tipo ORDER BY empresa, A.tmstmp desc; ELSE SELECT cdperson, nombreEmpresa(cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(tmstmp) FROM jdem10t WHERE cdperson = persona ORDER BY tmstmp desc; END IF; ELSE SELECT 'Sesi�n no valida.','','','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaDocumentosFiltro; CREATE PROCEDURE listaDocumentosFiltro(IN persona CHAR(16), IN tipo CHAR(1), IN fechai DATE, IN fechaf DATE, IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0) THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(A.tmstmp) FROM jdem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE intipprs = tipo AND DATE(A.tmstmp) BETWEEN fechai AND fechaf ORDER BY empresa, A.tmstmp desc; ELSE SELECT cdperson, nombreEmpresa(cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(tmstmp) FROM jdem10t WHERE cdperson = persona AND DATE(tmstmp) BETWEEN fechai AND fechaf ORDER BY tmstmp desc; END IF; ELSE SELECT 'Sesi�n no valida.','','','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS registraDocumento; CREATE PROCEDURE registraDocumento(IN persona CHAR(16), IN archivo VARCHAR(500), IN titulo VARCHAR(35), IN observaciones VARCHAR(2000), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT getCodigoApp('DE') INTO referencia; INSERT INTO jdem10t (cdperson,cddocele,dsfiles,dstitle,dsobserv,instatus,cdusuari,dsipfrom,programa,tmstmp) VALUES (persona,referencia,archivo,titulo,observaciones,'A',obtieneUsuario(sesion),obtieneIp(sesion),'registraDocumento',CURRENT_TIMESTAMP); SET error = ''; ELSE SET error = 'Error 50039: La sesi�n no es correcta. Restablezca la aplicaic�n.'; END IF; END; // DROP PROCEDURE IF EXISTS registraCabeceraFactura; CREATE PROCEDURE registraCabeceraFactura(IN persona CHAR(16),IN documento VARCHAR(35),IN archivo VARCHAR(500),IN tipo VARCHAR(100),IN serie VARCHAR(60),IN folio INT, IN fecha VARCHAR(30), IN formaPago VARCHAR(250), IN subTotal DOUBLE(13,2), IN descuento DOUBLE(13,2), IN tipoCambio DOUBLE(13,2), IN total DOUBLE(16,2), IN moneda VARCHAR(30), IN metodo VARCHAR(250), IN expedicion VARCHAR(250), IN rfcemisor VARCHAR(13), IN emisor VARCHAR(120), IN rfcreceptor VARCHAR(13), IN receptor VARCHAR(120),IN impuesto DOUBLE(13,2), IN uuid VARCHAR(40),IN timbre VARCHAR(21), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(cdfile) INTO existe FROM jdem20t WHERE cdperson=persona AND cddocele=documento; IF (existe>0)THEN SET error = 'Error 50031: Ya existe un registro asociado al documento. No se puede registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('FL') INTO referencia; SELECT CURRENT_TIMESTAMP INTO cts; INSERT INTO jdem20t (cdperson,cddocele,cdfile,dsfile,dstipo,dsserie,dsfolio,dsfecha,dsfrmpag,dbsubtot,dbdescto,dbtipcam,dbtotal,dsmoneda,dsmetpag,dslugexp,dsrfcemi,dsnomemi,dsrfcrec,dsnomrec, dbimptra,dsuuid,dsfectim,programa,tmstmp,cdusuari) VALUES (persona,documento,referencia,archivo,tipo,serie,folio,fecha,formaPago,subtotal,descuento,tipocambio,total,moneda,metodo,expedicion,rfcemisor,emisor,rfcreceptor,receptor,impuesto,uuid,timbre,'spInsertaFactura',CURRENT_TIMESTAMP,obtieneUsuario(sesion)); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave d�plicada en facturas. Notifique a sistemas.'; END IF; END; // DROP PROCEDURE IF EXISTS getCodigo; CREATE PROCEDURE getCodigo(IN var_objeto char(2), IN var_programa char(12), OUT var_codigo char(16)) BEGIN DECLARE lon int; DECLARE inprestado int; SELECT COUNT(inactivo) INTO inprestado FROM kaqcidt WHERE inactivo='n' AND cdobjeto=var_objeto; WHILE (inprestado=0) DO SELECT COUNT(inactivo) INTO inprestado FROM kaqcidt WHERE inactivo='n' AND cdobjeto=var_objeto; END WHILE; IF (inprestado>0) THEN SELECT (CAST(SUBSTRING(idactual,1,16) AS UNSIGNED) + CAST(SUBSTRING(SIN(CAST(SUBSTRING(idactual,1,16) AS UNSIGNED)),4,1) AS UNSIGNED))+3 INTO var_codigo FROM kaqcidt WHERE cdobjeto = var_objeto; SELECT CONCAT(REPEAT('0', 16-LENGTH(var_codigo)),var_codigo) INTO var_codigo; UPDATE kaqcidt SET inactivo = 's' , idactual = var_codigo, cdusuari = getUser(), programa = var_programa WHERE cdobjeto = var_objeto; END IF; END; // DROP PROCEDURE IF EXISTS setCodigoApp; CREATE PROCEDURE setCodigoApp(var_cdobjeto char(2)) BEGIN UPDATE kaqcidt SET inactivo = 'n' WHERE cdobjeto = var_cdobjeto; END // DROP FUNCTION IF EXISTS getCodigoApp; CREATE FUNCTION getCodigoApp(tipo char(2)) RETURNS CHAR(16) DETERMINISTIC BEGIN CALL getCodigo(tipo,'f(getCodigo)', @x); CALL setCodigoApp(tipo); RETURN @x; END // DROP FUNCTION IF EXISTS obtieneUsuario; CREATE FUNCTION obtieneUsuario(sesion VARCHAR(32)) RETURNS CHAR(16) DETERMINISTIC BEGIN RETURN (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); END // DROP FUNCTION IF EXISTS obtieneIP; CREATE FUNCTION obtieneIp(sesion CHAR(32)) RETURNS VARCHAR(15) DETERMINISTIC BEGIN RETURN (SELECT dsipfrom FROM jpem90t WHERE idsesion = sesion); END; // DROP PROCEDURE IF EXISTS registraPersona; CREATE PROCEDURE registraPersona(IN razonsocial VARCHAR(120), IN rfc VARCHAR(13), IN tipo ENUM('C','P'), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(dsrfc) INTO existe FROM jpem00t WHERE dsrfc = rfc; IF(existe>0)THEN SET error = 'Error 50031: Este RFC ya se encuentra registrado, no se puede volver a registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('PR') INTO referencia; INSERT INTO jpem00t (cdperson,dsrazsoc,dsrfc,dsfolder,dslogo,isowner,intipprs,cdusuari,programa,tmstmp) VALUE (referencia,razonsocial,rfc,referencia,'','N',tipo,obtieneUsuario(sesion),'registraPersona',CURRENT_TIMESTAMP); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave d�plicada en personas. Notifique a sistemas.'; END IF; END; // DROP FUNCTION IF EXISTS obtienePropietario; CREATE FUNCTION obtienePropietario(sesion VARCHAR(32)) RETURNS CHAR(16) DETERMINISTIC BEGIN DECLARE existe INT DEFAULT 0; DECLARE persona CHAR(16) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdusulog INTO persona FROM jpem90t WHERE idsesion = sesion; RETURN (SELECT cdperson FROM jpem10t WHERE cdcontac = persona); ELSE RETURN ''; END IF; END; // DROP PROCEDURE IF EXISTS obtienePersonas; CREATE PROCEDURE obtienePersonas(IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN IF (tipo= '-') THEN SELECT COUNT(cdperson) INTO existe FROM jpem00t WHERE isowner = 'S' AND cdperson = obtienePropietario(sesion); IF (existe=1)THEN SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE cdperson = obtienePropietario(sesion) AND isowner = 'S'; ELSE SELECT 'No se permite ','esta ', 'consulta.','-'; END IF; ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE intipprs = tipo AND isowner = 'N'; END IF; ELSE SELECT 'Sesi�n ','no ', 'valida.',''; END IF; END; // DROP PROCEDURE IF EXISTS localizaPersonasPorRFC; CREATE PROCEDURE localizaPersonasPorRFC(IN tipo CHAR(1), IN rfc VARCHAR(35), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE condicion VARCHAR(37) DEFAULT ''; DECLARE micodigo CHAR(16) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdperson INTO micodigo FROM jpem10t WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); SELECT COUNT(isowner) INTO existe FROM jpem00t WHERE cdperson = micodigo AND isowner = 'S'; IF(existe>0)THEN SELECT CONCAT('%',rfc,'%') INTO condicion; IF(tipo='*') THEN SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE isowner = 'N' AND dsrfc LIKE (condicion); ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE intipprs = tipo AND isowner = 'N' AND dsrfc LIKE (condicion); END IF; ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE cdperson = micodigo; END IF; ELSE SELECT 'Sesi�n ','no ', 'valida.',''; END IF; END; // DROP PROCEDURE IF EXISTS localizaPersonas; CREATE PROCEDURE localizaPersonas(IN tipo CHAR(1), IN nombre VARCHAR(35), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE condicion VARCHAR(37) DEFAULT ''; DECLARE micodigo CHAR(16) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdperson INTO micodigo FROM jpem10t WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); SELECT COUNT(isowner) INTO existe FROM jpem00t WHERE cdperson = micodigo AND isowner = 'S'; IF(existe>0)THEN SELECT CONCAT('%',nombre,'%') INTO condicion; IF(tipo='*') THEN SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE isowner = 'N' AND dsrazsoc LIKE (condicion); ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE intipprs = tipo AND isowner = 'N' AND dsrazsoc LIKE (condicion); END IF; ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE cdperson = micodigo; END IF; ELSE SELECT 'Sesi�n ','no ', 'valida.',''; END IF; END; // DROP PROCEDURE IF EXISTS preRegistroContacto; CREATE PROCEDURE preRegistroContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo VARCHAR(1), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE referencia VARCHAR(32); DECLARE error VARCHAR(250); CALL registraContacto(persona,primero,segundo,correo,telefono,movil,grupo,valenc,'preregistro',@referencia,@error); IF(referencia<>'')THEN SET referencia = @referencia; UPDATE jpem01t SET instatus = 'P' WHERE cdperson = persona AND cdcontac = @referencia; ELSE SET error = @error; END IF; END; // DROP PROCEDURE IF EXISTS registraContacto; CREATE PROCEDURE registraContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo VARCHAR(32), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo; IF (existe>0)THEN SET error = 'Error 50031: Este correo electr�nico ya se encuentra registrado, no se puede volver a registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('CT') INTO referencia; SELECT CURRENT_TIMESTAMP INTO cts; INSERT INTO jpem10t (cdperson,cdcontac,dsfirst,dslast,dsmail,dstelloc,dstelmov,cdusuari,programa,tmstmp,dsipfrom) VALUES (persona,referencia,primero,segundo,correo,telefono,movil,obtieneUsuario(sesion),'registraContacto',cts,obtieneIp(sesion)); INSERT INTO jusm01t (cdperson,cdcontac,cdidegrp,cdusulog,dsvalenc,instatus,inusumod,dsipfrom,cdusuari,programa,tmstmp) VALUES (persona,referencia,grupo,correo,CONCAT(MD5(CONCAT(referencia,cts)),MD5(valenc)),'A',0,obtieneIp(sesion),obtieneUsuario(sesion),'registraContacto',cts); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave d�plicada en personas. Notifique a sistemas.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaContacto; CREATE PROCEDURE actualizaContacto(IN identificador CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), IN telefono VARCHAR(60), IN movil VARCHAR(14), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo AND cdcontac <> identificador; IF (existe>0)THEN SET error = 'Error 50031: Este correo electr�nico ya se encuentra registrado, no se puede volver a registrar.'; ELSE UPDATE jpem10t SET dsfirst = primero, dslast = segundo, dsmail = correo, dstelloc = telefono, dstelmov = movil, programa = CONCAT('ACT',sesion) WHERE cdcontac = identificador; UPDATE jusm01t SET cdusulog = correo, programa = CONCAT('ACT',sesion) WHERE cdcontac = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS eliminaContacto; CREATE PROCEDURE eliminaContacto(IN identificador CHAR(16), IN correo VARCHAR(100), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo AND cdcontac = identificador; IF (existe<=0)THEN SET error = 'Error 50032: El correo electr�nico no se encuentra registrado, no se puede eliminar el registro.'; ELSE DELETE FROM jusm01t WHERE cdusulog = correo AND cdcontac = (SELECT cdcontac FROM jpem10t WHERE dsmail = correo AND cdcontac = identificador); DELETE FROM jpem10t WHERE dsmail = correo AND cdcontac = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS obtieneContactos; CREATE PROCEDURE obtieneContactos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE persona CHAR(16) DEFAULT ''; DECLARE tipo CHAR(1) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, intipprs INTO persona, tipo FROM jpem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); CALL obtieneContactosPersona(persona, tipo, sesion); ELSE SELECT '','','Sesi�n no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS obtieneContactosPersona; CREATE PROCEDURE obtieneContactosPersona(IN persona VARCHAR(16), IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac INNER JOIN jpem00t C ON A.cdperson = C.cdperson WHERE intipprs = tipo; ELSE SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson = persona AND persona NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S'); END IF; ELSE SELECT '','','Sesi�n no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaContactos; CREATE PROCEDURE listaContactos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S') AND instatus = 'A'; ELSE SELECT '','','Sesi�n no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaContactosSuspendidos; CREATE PROCEDURE listaContactosSuspendidos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S') AND instatus = 'S'; ELSE SELECT '','','Sesi�n no valida.','','','','',''; END IF; END; // DROP FUNCTION IF EXISTS nombreEmpresa; CREATE FUNCTION nombreEmpresa(empresa CHAR(16)) RETURNS VARCHAR(100) DETERMINISTIC BEGIN RETURN (SELECT dsrazsoc FROM jpem00t WHERE cdperson = empresa); END; // DROP PROCEDURE IF EXISTS actualizaGrupoContacto; CREATE PROCEDURE actualizaGrupoContacto(IN contacto CHAR(16), IN grupo VARCHAR(32), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET cdidegrp = grupo, programa = CONCAT('NWGRP',grupo,obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaAccesoContacto; CREATE PROCEDURE actualizaAccesoContacto(IN usuario VARCHAR(120), IN valenc VARCHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE contacto CHAR(16) DEFAULT ''; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = usuario; IF(existe>0)THEN SELECT cdcontac INTO contacto FROM jpem10t WHERE dsmail = usuario; SELECT COUNT(isowner) INTO existe FROM jpem00t WHERE isowner = 'S' AND cdperson = (SELECT cdperson FROM jpem10t WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion)); IF (existe>0)THEN UPDATE jusm01t SET dsvalenc = CONCAT(MD5(CONCAT(contacto,tmstmp)),MD5(valenc)), programa = CONCAT('NWPWD',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = usuario; SET error = ''; ELSE SET error = 'Error 50040: Acceso a operaci�n denegada.'; END IF; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS suspendeAccesoContacto; CREATE PROCEDURE suspendeAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'S', dsvalenc = CONCAT(MD5(CONCAT(contacto,CURRENT_TIMESTAMP)),MD5('USUARIO-SUSPENDIDO')), programa = CONCAT('MDSUS',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS eliminaAccesoContacto; CREATE PROCEDURE eliminaAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'E', dsvalenc = CONCAT(MD5(CONCAT(CURRENT_TIMESTAMP,'0000000000000000')),MD5('USUARIO-ELIMINADO')), programa = CONCAT('MDELM',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS activaAccesoContacto; CREATE PROCEDURE activaAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'A', programa = CONCAT('MDACT',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS whois; CREATE PROCEDURE whois(IN usuario VARCHAR(100), IN valor VARCHAR(16), IN rfc VARCHAR(13), IN ipfrom VARCHAR(15), IN sesion VARCHAR(32), OUT seguridad INT, OUT referencia CHAR(16), OUT propietario CHAR(1), OUT persona CHAR(16), OUT error VARCHAR(255)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(dsrfc) INTO existe FROM jpem00t WHERE dsrfc = rfc; IF (existe>0)THEN -- SELECT cdperson INTO persona FROM jpem00t WHERE dsrfc = rfc; SELECT COUNT(cdcontac) INTO existe FROM jusm01t WHERE cdusulog = usuario AND dsvalenc = CONCAT(MD5(CONCAT(cdcontac,tmstmp)),MD5(valor)) AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); IF(existe>0)THEN SELECT cdcontac, cdperson INTO referencia, persona FROM jusm01t WHERE cdusulog = usuario AND dsvalenc = CONCAT(MD5(CONCAT(cdcontac,tmstmp)),MD5(valor)) AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); -- SELECT COUNT(cdperson) INTO existe FROM jpem10t WHERE cdperson = persona AND cdcontac = referencia; -- IF(existe>0)THEN SELECT ingrpmod INTO seguridad FROM jgrm01t WHERE cdidegrp = (SELECT cdidegrp FROM jusm01t WHERE cdcontac = referencia); SELECT isowner INTO propietario FROM jpem00t WHERE cdperson = persona; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT COUNT(dsipfrom) INTO existe FROM jpem90t WHERE idsesion = sesion AND dsipfrom = ipfrom; IF(existe>0)THEN SET error = ''; ELSE SET error = 'Error 50043: No se puede validar la sesi�n. Espere unos minutos para volver a intentarlo.'; END IF; ELSE INSERT INTO jpem90t (idsesion,cdusulog,dsusulog,dsipfrom,cdusuari,programa,tmstmp) VALUES (sesion,referencia,usuario,ipfrom,getUser(),'insertaSesion',CURRENT_TIMESTAMP); SET error = ''; END IF; -- ELSE -- SET error = 'Error 50042:Este usuario no esta asociado al cliente.'; -- END IF; ELSE SET error = (SELECT CONCAT('Error 50041:El usuario o la contrase�a son incorrectos en: ', rfc)); END IF; ELSE SET error = 'Error 50040:El registro de empresa no existe.'; END IF; END; // DROP PROCEDURE IF EXISTS listaGrupos; CREATE PROCEDURE listaGrupos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdidegrp, dsidegrp FROM jgrm01t WHERE cdidegrp NOT IN ('*','A') ORDER BY dsidegrp; ELSE SELECT '','La sesion no existe'; END IF; END; // DROP PROCEDURE IF EXISTS consultaValoresSesion; CREATE PROCEDURE consultaValoresSesion(IN sesion VARCHAR(32), OUT identificador CHAR(16), OUT dominio CHAR(16), OUT razonsocial VARCHAR(100), OUT folder CHAR(16), OUT tipoPersona CHAR(1), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, B.cdcontac, A.dsrazsoc, A.dsfolder, A.intipprs INTO dominio, identificador, razonsocial, folder, tipoPersona FROM jpem00t A INNER JOIN jpem10t B ON A.cdperson = B.cdperson INNER JOIN jpem90t C ON B.cdcontac = C.cdusulog WHERE idsesion = sesion; SET error = ''; ELSE SET error = 'Error 50039: La sesi�n no existe.'; END IF; END; // DROP PROCEDURE IF EXISTS registraGrupo; CREATE PROCEDURE registraGrupo(IN grupo VARCHAR(32), OUT referencia VARCHAR(32), OUT error VARCHAR(200)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdidegrp) INTO existe FROM jgrm01t WHERE dsidegrp = grupo; SET referencia = ''; SET error = ''; IF(existe>0)THEN SET error = 'El registro ya existe, no se puede ingresar.'; ELSE SELECT MD5(grupo) INTO referencia; INSERT INTO jgrm01t VALUES (referencia,grupo,0,'sysadmindoce','sp_',CURRENT_TIMESTAMP); SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS actualizaGrupo; CREATE PROCEDURE actualizaGrupo(IN identificador VARCHAR(32), IN grupo VARCHAR(35), IN modo INT, OUT error VARCHAR(200)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdidegrp) INTO existe FROM jgrm01t WHERE dsidegrp = grupo; SET error = ''; IF(existe<=0)THEN SET error = 'El registro no existe, no se puede actualizar.'; ELSE UPDATE jgrm01t SET dsidegrp = grupo, ingrpmod = modo WHERE cdidegrp = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS eliminaGrupo; CREATE PROCEDURE eliminaGrupo(IN identificador VARCHAR(32), OUT error VARCHAR(200)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdidegrp) INTO existe FROM jgrm01t WHERE cdidegrp = identificador; SET error = ''; IF(existe<=0)THEN SET error = 'El registro no existe, no se puede eliminar.'; ELSE DELETE FROM jgrm01t WHERE cdidegrp = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS actualizaGrupoContacto; CREATE PROCEDURE actualizaGrupoContacto(IN contacto CHAR(16), IN grupo VARCHAR(32), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET cdidegrp = grupo, programa = CONCAT('NWGRP',grupo,obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS preRegistroContacto; CREATE PROCEDURE preRegistroContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo VARCHAR(32), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE referencia VARCHAR(32); DECLARE error VARCHAR(250); CALL registraContacto(persona,primero,segundo,correo,telefono,movil,grupo,valenc,'preregistro',@referencia,@error); IF(referencia<>'')THEN SET referencia = @referencia; UPDATE jpem01t SET instatus = 'P' WHERE cdperson = persona AND cdcontac = @referencia; ELSE SET error = @error; END IF; END; // DROP PROCEDURE IF EXISTS registraContacto; CREATE PROCEDURE registraContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo VARCHAR(32), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo; IF (existe>0)THEN SET error = 'Error 50031: Este correo electrónico ya se encuentra registrado, no se puede volver a registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('CT') INTO referencia; SELECT CURRENT_TIMESTAMP INTO cts; INSERT INTO jpem10t (cdperson,cdcontac,dsfirst,dslast,dsmail,dstelloc,dstelmov,cdusuari,programa,tmstmp,dsipfrom) VALUES (persona,referencia,primero,segundo,correo,telefono,movil,obtieneUsuario(sesion),'registraContacto',cts,obtieneIp(sesion)); INSERT INTO jusm01t (cdperson,cdcontac,cdidegrp,cdusulog,dsvalenc,instatus,inusumod,dsipfrom,cdusuari,programa,tmstmp) VALUES (persona,referencia,grupo,correo,CONCAT(MD5(CONCAT(referencia,cts)),MD5(valenc)),'A',0,obtieneIp(sesion),obtieneUsuario(sesion),'registraContacto',cts); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave dúplicada en personas. Notifique a sistemas.'; END IF; END; // DROP PROCEDURE IF EXISTS registraCabecera; CREATE PROCEDURE registraCabecera(IN periodo VARCHAR(32), IN asociadoA VARCHAR(250), IN tipo VARCHAR(2), IN fecha DATE, IN documento VARCHAR(30), IN referenciaDocumento VARCHAR(60), OUT referencia CHAR(32), OUT error VARCHAR(250)) BEGIN SELECT MD5(getCodigoApp('PC')) INTO referencia; INSERT INTO jctm09t (idreggas,dsasocia,idnumper,intipgas,fefecreg,dsdocto,dsrefdoc,instatus,cdusuari,tmstmp) VALUES (referencia,asociadoA,periodo,tipo,fecha,documento,referenciaDocumento,'A',getUser(),CURRENT_TIMESTAMP); SET error = ''; END; // DROP PROCEDURE IF EXISTS cierraRegistroCabecera; CREATE PROCEDURE cierraRegistroCabecera(IN cabecera VARCHAR(32), OUT estatus CHAR(1), OUT error CHAR(1)) BEGIN DECLARE existe INT; SELECT COUNT(idreggas) INTO existe FROM jctm09t WHERE idreggas = cabecera AND instatus = 'A'; IF(existe>0) THEN UPDATE jctm09t SET instatus = 'C' WHERE idreggas = cabecera; SET estatus = 'C'; SET error = ''; ELSE SET estatus = ' '; SET error = 'El registro no existe. Verifique.'; END IF; END // DROP VIEW IF EXISTS jctc10v; CREATE VIEW jctc10v AS SELECT A.idreggas, A.idregper, A.intipreg, A.dtfecreg, A.dbimpreg, A.dbimpues, A.dsautori, A.instatus, A.dsnotreg, B.iddocele, A.dsregper FROM jctm10t A LEFT JOIN jctm11t B ON A.idreggas = B.idreggas AND A.idregper = B.idregper ORDER BY A.dtfecreg; DROP PROCEDURE IF EXISTS insertaRegistro; CREATE PROCEDURE insertaRegistro(IN cabecera VARCHAR(32), IN descripcion VARCHAR(250), IN tipo VARCHAR(2), IN fecha DATE, IN importe DOUBLE, IN impuesto DOUBLE, IN nota VARCHAR(30), IN autoriza VARCHAR(120), OUT referencia VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE cts DATETIME; SELECT CURRENT_TIMESTAMP INTO cts; SELECT MD5(CONCAT(getCodigoApp('RC'),cts)) INTO referencia; INSERT INTO jctm10t (idreggas,idregper,dsregper,intipreg,dtfecreg,dbimpreg,dbimpues,instatus,dsautori,dsnotreg,cdusuari,tmstmp) VALUES (cabecera,referencia,descripcion,tipo,fecha,importe,impuesto,'P',autoriza,nota,getUser(),CURRENT_TIMESTAMP); SET error = ''; END; // DROP PROCEDURE IF EXISTS insertaEvidencia; CREATE PROCEDURE insertaEvidencia(IN cabecera VARCHAR(32), IN registro VARCHAR(32), IN evidencia VARCHAR(32), OUT referencia VARCHAR(32), OUT error VARCHAR(250)) BEGIN INSERT INTO jctm11t (idreggas,idregper,iddocele,cdusuari,tmstmp) VALUES (cabecera,registro,evidencia,getUser(),CURRENT_TIMESTAMP); SET error = ''; END; // DROP PROCEDURE IF EXISTS eliminaRegistro; CREATE PROCEDURE eliminaRegistro(IN registro VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE hayEvidencia INT; SELECT COUNT(iddocele) INTO hayEvidencia FROM jctm11t WHERE idregper = registro; IF (hayEvidencia>0) THEN DELETE FROM jdem20t WHERE cddocele IN (SELECT iddocele FROM jctm11t WHERE idregper = registro); DELETE FROM jctm11t WHERE idregper = registro; END IF; DELETE FROM jctm10t WHERE idregper = registro; SET error = ''; END; // DROP PROCEDURE IF EXISTS registraCifraControl; CREATE PROCEDURE registraCifraControl(IN periodo VARCHAR(16), IN registros INT, IN monto DOUBLE, OUT error VARCHAR(200)) BEGIN DECLARE existePeriodoActivo INT; SELECT COUNT(idnumper) INTO existePeriodoActivo FROM jctm01t WHERE idnumper = periodo AND instatus = 'A'; IF(existePeriodoActivo > 0)THEN SELECT COUNT(idnumper) INTO existePeriodoActivo FROM jctm02t WHERE idnumper = periodo; IF(existePeriodoActivo) THEN UPDATE jctm02t SET innumreg = registros, dbmonto = monto WHERE idnumper = periodo; SET error = ''; ELSE INSERT INTO jctm02t VALUES (periodo, registros,monto,getUser(), CURRENT_TIMESTAMP); SET error = ''; END IF; ELSE SET error = 'El periodo no existe o no esta activo'; END IF; END; // DROP PROCEDURE IF EXISTS cerrarPeriodo; CREATE PROCEDURE cerrarPeriodo(IN periodo VARCHAR(16), IN comentario VARCHAR(2000), OUT fecha DATE, OUT estatus CHAR(1), OUT error VARCHAR(200)) BEGIN DECLARE existePeriodoActivo INT; SELECT COUNT(idnumper) INTO existePeriodoActivo FROM jctm01t WHERE idnumper = periodo AND instatus = 'A'; IF(existePeriodoActivo > 0)THEN SELECT CURRENT_DATE INTO fecha; SELECT 'C' INTO estatus; UPDATE jctm01t SET dscoment = comentario, dtfeccie = fecha, instatus = estatus WHERE idnumper = periodo; SET error = ''; ELSE SET error = 'El periodo no existe o no esta activo'; END IF; END; // cerrarPeriodo('0000000000000234', 'Cierre manual desde SP', @fecha, @estatus, @error) DROP PROCEDURE IF EXISTS abrirPeriodo; CREATE PROCEDURE abrirPeriodo(IN anio INT, IN periodo INT, OUT identificador VARCHAR(32), OUT fecha DATE, OUT estatus CHAR(1), OUT comentarios VARCHAR(2000), OUT error VARCHAR(200)) BEGIN DECLARE existePeriodoActivo INT; DECLARE fechaHora DATETIME; SELECT COUNT(idnumper) INTO existePeriodoActivo FROM jctm01t WHERE instatus = 'A'; IF(existePeriodoActivo > 0)THEN SET error = 'Existe un periodo activo, no se puede abrir otro, primero cierre el anterior.'; ELSE -- cafaray -> 030918 Por alguna razon Java siempre manda mal el periodo :(, colocamos en automatico -- IF (periodo>=1 AND periodo <= 12) THEN SELECT IF(innumper=12, 1, innumper+1) INTO existePeriodoActivo FROM jctm01t ORDER BY dtfeccie DESC LIMIT 1; -- IF (existePeriodoActivo = periodo) THEN SELECT MD5(getCodigoApp('PD')) INTO identificador; SELECT CURRENT_DATE INTO fecha; SELECT 'A' INTO estatus; SELECT CURRENT_TIMESTAMP INTO fechaHora; SELECT CONCAT('Se abre el periodo con exito a las ', fechaHora) INTO comentarios; INSERT INTO jctm01t VALUES (identificador, anio, existePeriodoActivo, fecha, NULL, estatus, comentarios, getUser(), CURRENT_TIMESTAMP); SET error = ''; -- ELSE -- SET error = (SELECT CONCAT('Periodo fuera de rango, se espera el valor ', existePeriodoActivo, ' se recibe ', periodo)); -- END IF; -- ELSE -- SET error = (SELECT CONCAT('Periodo fuera de rango, se espera un valor entre 1 y 12, se recibe ', periodo)); -- END IF; END IF; END; // CALL abrirPeriodo(2018, 1, @identificador, @fecha, @estatus, @comentarios, @error) DROP PROCEDURE IF EXISTS registraEstatusDocumento; CREATE PROCEDURE registraEstatusDocumento(IN persona VARCHAR(16), IN documento VARCHAR(16), IN estatus CHAR(1), IN comentario VARCHAR(2000), IN sesion VARCHAR(32), OUT identificador INT, OUT fecha TIMESTAMP, OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem10t WHERE cdperson = persona AND cddocele = documento; IF(existeDocumento > 0)THEN SELECT COUNT(cddocele) + 1 INTO identificador FROM jdem30t WHERE cdperson = persona AND cddocele = documento; SELECT CURRENT_TIMESTAMP INTO fecha; INSERT INTO jdem30t VALUES (persona, documento, identificador, estatus, fecha, sesion, comentario); UPDATE jdem10t SET instatus = estatus WHERE cdperson = persona AND cddocele = documento; SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaEstatusDocumento; CREATE PROCEDURE actualizaEstatusDocumento(IN persona VARCHAR(16), IN documento VARCHAR(16), IN estatus CHAR(1), IN comentario VARCHAR(2000), IN sesion VARCHAR(32), OUT identificador INT, OUT fecha TIMESTAMP, OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem30t WHERE cdperson = persona AND cddocele = documento; IF(existeDocumento > 0)THEN SELECT COUNT(cddocele) + 1 INTO identificador FROM jdem30t WHERE cdperson = persona AND cddocele = documento; SELECT TIMESTAMP INTO fecha; INSERT INTO jdem30t VALUES (persona, documento, identificador, estatus, fecha, sesion, comentario); SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // DROP PROCEDURE IF EXISTS registraPromesaPago; CREATE PROCEDURE registraPromesaPago(IN persona VARCHAR(16), IN documento VARCHAR(16), IN fecha TIMESTAMP, IN sesion VARCHAR(32), OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem10t WHERE cdperson = persona AND cddocele = documento; IF(existeDocumento > 0)THEN INSERT INTO jdem31t VALUES (persona, documento, fecha, sesion, CURRENT_TIMESTAMP); SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // DROP PROCEDURE IF EXISTS insertaNotificacion; CREATE PROCEDURE insertaNotificacion(IN persona VARCHAR(16), IN documento VARCHAR(16), IN actualizacion INT, IN mensaje VARCHAR(500), IN concopia VARCHAR(2000), OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem30t WHERE cdperson = persona AND cddocele = documento AND idnumact = actualizacion; IF(existeDocumento > 0)THEN INSERT INTO jdem40t VALUES (persona, documento, actualizacion, mensaje, null, 'P', concopia, CURRENT_TIMESTAMP); SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaNotificacion; CREATE PROCEDURE actualizaNotificacion(IN persona VARCHAR(16), IN documento VARCHAR(16), IN actualizacion INT, IN fecha DATE, IN estatus CHAR(1), OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem40t WHERE cdperson = persona AND cddocele = documento AND idnumact = actualizacion; IF(existeDocumento > 0)THEN UPDATE jdem40t SET dtnotifi = fecha, instatus = estatus WHERE cdperson = persona AND cddocele = documento AND idnumact = actualizacion; SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // <file_sep>/src/java/com/ftc/gedoc/bo/GrupoBO.java package com.ftc.gedoc.bo; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.utiles.Grupo; import java.util.List; public interface GrupoBO { List<Grupo> listar()throws GeDocBOException; Grupo actualizar(Grupo grupo)throws GeDocBOException; Grupo buscar(String id) throws GeDocBOException; Grupo asignarPermisos(String id, long permisos) throws GeDocBOException; void eliminar(String id)throws GeDocBOException; } <file_sep>/db/ctrldoce_estatus.sql -- ctrldoce:: para el manejo de estatus y notificaciones de facturas de proveedor. -- registro de estatus DROP TABLE IF exists jdem30t; CREATE TABLE jdem30t ( cdperson VARCHAR(32) NOT NULL, cddocele VARCHAR(32) NOT NULL, idnumact INT NOT NULL DEFAULT 0, instatus ENUM('L','D','C','V','R','T','P','X','') NULL DEFAULT '', dtfecsta TIMESTAMP NOT NULL, idnumses VARCHAR(32) NOT NULL, dscoment VARCHAR(2000) NOT NULL, PRIMARY KEY(cdperson, cddocele, idnumact), FOREIGN KEY(cdperson, cddocele) REFERENCES jdem10t(cdperson, cddocele) ON DELETE CASCADE ON UPDATE CASCADE )Engine=INNODB; DROP PROCEDURE IF EXISTS registraEstatusDocumento; CREATE PROCEDURE registraEstatusDocumento(IN persona VARCHAR(16), IN documento VARCHAR(16), IN estatus CHAR(1), IN comentario VARCHAR(2000), IN sesion VARCHAR(32), OUT identificador INT, OUT fecha TIMESTAMP, OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem10t WHERE cdperson = persona AND cddocele = documento; IF(existeDocumento > 0)THEN SELECT COUNT(cddocele) + 1 INTO identificador FROM jdem30t WHERE cdperson = persona AND cddocele = documento; SELECT CURRENT_TIMESTAMP INTO fecha; INSERT INTO jdem30t VALUES (persona, documento, identificador, estatus, fecha, sesion, comentario); UPDATE jdem10t SET instatus = estatus WHERE cdperson = persona AND cddocele = documento; SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaEstatusDocumento; CREATE PROCEDURE actualizaEstatusDocumento(IN persona VARCHAR(16), IN documento VARCHAR(16), IN estatus CHAR(1), IN comentario VARCHAR(2000), IN sesion VARCHAR(32), OUT identificador INT, OUT fecha TIMESTAMO, OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem30t WHERE cdperson = persona AND cddocele = documento; IF(existeDocumento > 0)THEN SELECT COUNT(cddocele) + 1 INTO identificador FROM jdem30t WHERE cdperson = persona AND cddocele = documento; SELECT TIMESTAMP INTO fecha; INSERT INTO jdem30t VALUES (persona, documento, identificador, estatus, fecha, sesion, comentario); SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // -- registro de la fecha promesa de pago DROP TABLE IF EXISTS jdem31t; CREATE TABLE jdem31t ( cdperson VARCHAR(32) NOT NULL, cddocele VARCHAR(32) NOT NULL, dtfecpag DATE NOT NULL, idnumses VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdperson, cddocele), FOREIGN KEY(cdperson, cddocele) REFERENCES jdem10t(cdperson, cddocele) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=InnoDB; DROP PROCEDURE IF EXISTS registraPromesaPago; CREATE PROCEDURE registraPromesaPago(IN persona VARCHAR(16), IN documento VARCHAR(16), IN fecha TIMESTAMP, IN sesion VARCHAR(32), OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem10t WHERE cdperson = persona AND cddocele = documento; IF(existeDocumento > 0)THEN INSERT INTO jdem31t VALUES (persona, documento, fecha, sesion, CURRENT_TIMESTAMP); SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // -- registro de las notificaciones: Estatus: Pendiente, Enviada, Cancelada DROP TABLE IF EXISTS jdem40t; CREATE TABLE jdem40t ( cdperson VARCHAR(32) NOT NULL, cddocele VARCHAR(32) NOT NULL, idnumact INT NOT NULL DEFAULT 0, dsmensaj VARCHAR(500) NOT NULL, dtnotifi DATE NULL, instatus ENUM('P','S','C','') NULL DEFAULT '', dsccmail VARCHAR(2000) NULL DEFAULT '', tmstmp TIMESTAMP NOT NULL, PRIMARY KEY(cdperson, cddocele, idnumact), FOREIGN KEY(cdperson, cddocele, idnumact) REFERENCES jdem30t(cdperson, cddocele, idnumact) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=InnoDB; DROP PROCEDURE IF EXISTS insertaNotificacion; CREATE PROCEDURE insertaNotificacion(IN persona VARCHAR(16), IN documento VARCHAR(16), IN actualizacion INT, IN mensaje VARCHAR(500), IN concopia VARCHAR(2000), OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem30t WHERE cdperson = persona AND cddocele = documento AND idnumact = actualizacion; IF(existeDocumento > 0)THEN INSERT INTO jdem40t VALUES (persona, documento, actualizacion, mensaje, null, 'P', concopia, CURRENT_TIMESTAMP); SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaNotificacion; CREATE PROCEDURE actualizaNotificacion(IN persona VARCHAR(16), IN documento VARCHAR(16), IN actualizacion INT, IN fecha DATE, IN estatus CHAR(1), OUT error VARCHAR(200)) BEGIN DECLARE existeDocumento INT; SELECT COUNT(cddocele) INTO existeDocumento FROM jdem40t WHERE cdperson = persona AND cddocele = documento AND idnumact = actualizacion; IF(existeDocumento > 0)THEN UPDATE jdem40t SET dtnotifi = fecha, instatus = estatus WHERE cdperson = persona AND cddocele = documento AND idnumact = actualizacion; SET error = ''; ELSE SET error = 'El documento solicitado no existe en el dominio actual.'; END IF; END; // <file_sep>/src/java/com/ftc/services/invoice/modelo/CEPConcepto.java package com.ftc.services.invoice.modelo; public class CEPConcepto { private String claveProdServ; private int cantidad; private String claveUnidad; private String descripcion; private double valorUnitario; private double importe; public CEPConcepto(){ System.out.println("Concepto de pago CEP creado"); } public String getClaveProdServ() { return claveProdServ; } public void setClaveProdServ(String claveProdServ) { this.claveProdServ = claveProdServ; } public int getCantidad() { return cantidad; } public void setCantidad(int cantidad) { this.cantidad = cantidad; } public String getClaveUnidad() { return claveUnidad; } public void setClaveUnidad(String claveUnidad) { this.claveUnidad = claveUnidad; } public String getDescripcion() { return descripcion; } public void setDescripcion(String descripcion) { this.descripcion = descripcion; } public double getValorUnitario() { return valorUnitario; } public void setValorUnitario(double valorUnitario) { this.valorUnitario = valorUnitario; } public double getImporte() { return importe; } public void setImporte(double importe) { this.importe = importe; } @Override public String toString() { return "CEPConcepto{" + "claveProdServ='" + claveProdServ + '\'' + ", cantidad=" + cantidad + ", claveUnidad='" + claveUnidad + '\'' + ", descripcion='" + descripcion + '\'' + ", valorUnitario=" + valorUnitario + ", importe=" + importe + '}'; } } <file_sep>/src/java/com/ftc/gedoc/dao/impl/SeguridadDAOImpl.java package com.ftc.gedoc.dao.impl; import com.ftc.aq.Conexion; import com.ftc.aq.SpParam; import com.ftc.aq.SpParams; import com.ftc.gedoc.dao.SeguridadDAO; import com.ftc.gedoc.exceptions.GeDocDAOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.HashMap; import java.util.Map; public class SeguridadDAOImpl implements SeguridadDAO { Connection conexion; @Override public Map<String, String> listaGrupos(String sesion) throws GeDocDAOException { try { conexion = Conexion.getConexion(); Map<String, String> listado = new HashMap<String, String>(); SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, sesion)); ResultSet rst = Conexion.consultaStoreProcedure(conexion, "listaGrupos", params); while (rst.next()) { listado.put(rst.getString(1), rst.getString(2)); } return listado; } catch(SQLException e){ throw new GeDocDAOException("Imposible determinar los grupos para el usuario, fallo el procedimiento (listaGrupos). " + e.getMessage()); } finally { try { if (conexion != null){ conexion.close(); } }catch(SQLException e){} } } } <file_sep>/src/java/com/ftc/gedoc/dao/SeguridadDAO.java package com.ftc.gedoc.dao; import com.ftc.gedoc.exceptions.GeDocDAOException; import java.util.Map; public interface SeguridadDAO { Map<String,String> listaGrupos(String sesion)throws GeDocDAOException; } <file_sep>/src/java/com/ftc/gedoc/utiles/comparators/ExpensesComparatorPorEstatus.java package com.ftc.gedoc.utiles.comparators; import com.ftc.gedoc.utiles.PeriodoCabecera; import java.util.Comparator; public class ExpensesComparatorPorEstatus implements Comparator<PeriodoCabecera> { @Override public int compare(PeriodoCabecera o1, PeriodoCabecera o2) { return o1.getDsEstatus().compareTo(o2.getDsEstatus()); } } <file_sep>/src/java/com/ftc/services/invoice/modelo/CEPCabecera.java package com.ftc.services.invoice.modelo; import java.util.Date; import java.util.List; public class CEPCabecera { private static final long serialVersionUID = 8372722219280582677L; private String identificador; private String version; private String serie; private String folio; private Date fecha; private double subTotal; private String moneda; private double total; private String tipoDeComprobante; private String lugarExpedicion; private String xmlnsPago10; private String rfcEmisor; private String nombreEmisor; private String regimenFiscalEmisor; private String rfcReceptor; private String nombreReceptor; private String usoCFDIReceptor; private String rfcProvCertif; private String versionTimbreFiscal; private String Uuid; private Date fechaTimbrado; private String noCertificadoSAT; private String versionPagos; private List<CEPConcepto> conceptos; private List<CEPPago> pagos; public CEPCabecera() { System.out.println("Se genero la cabecera de pago."); } public static long getSerialVersionUID() { return serialVersionUID; } public String getVersion() { return version; } public void setVersion(String version) { this.version = version; } public String getSerie() { return serie; } public void setSerie(String serie) { this.serie = serie; } public String getFolio() { return folio; } public void setFolio(String folio) { this.folio = folio; } public Date getFecha() { return fecha; } public void setFecha(Date fecha) { this.fecha = fecha; } public double getSubTotal() { return subTotal; } public void setSubTotal(double subTotal) { this.subTotal = subTotal; } public String getMoneda() { return moneda; } public void setMoneda(String moneda) { this.moneda = moneda; } public double getTotal() { return total; } public void setTotal(double total) { this.total = total; } public String getTipoDeComprobante() { return tipoDeComprobante; } public void setTipoDeComprobante(String tipoDeComprobante) { this.tipoDeComprobante = tipoDeComprobante; } public String getLugarExpedicion() { return lugarExpedicion; } public void setLugarExpedicion(String lugarExpedicion) { this.lugarExpedicion = lugarExpedicion; } public String getXmlnsPago10() { return xmlnsPago10; } public void setXmlnsPago10(String xmlnsPago10) { this.xmlnsPago10 = xmlnsPago10; } public String getRfcEmisor() { return rfcEmisor; } public void setRfcEmisor(String rfcEmisor) { this.rfcEmisor = rfcEmisor; } public String getNombreEmisor() { return nombreEmisor; } public void setNombreEmisor(String nombreEmisor) { this.nombreEmisor = nombreEmisor; } public String getRegimenFiscalEmisor() { return regimenFiscalEmisor; } public void setRegimenFiscalEmisor(String regimenFiscalEmisor) { this.regimenFiscalEmisor = regimenFiscalEmisor; } public String getRfcReceptor() { return rfcReceptor; } public void setRfcReceptor(String rfcReceptor) { this.rfcReceptor = rfcReceptor; } public String getNombreReceptor() { return nombreReceptor; } public void setNombreReceptor(String nombreReceptor) { this.nombreReceptor = nombreReceptor; } public String getUsoCFDIReceptor() { return usoCFDIReceptor; } public void setUsoCFDIReceptor(String usoCFDIReceptor) { this.usoCFDIReceptor = usoCFDIReceptor; } public String getRfcProvCertif() { return rfcProvCertif; } public void setRfcProvCertif(String rfcProvCertif) { this.rfcProvCertif = rfcProvCertif; } public String getVersionTimbreFiscal() { return versionTimbreFiscal; } public void setVersionTimbreFiscal(String versionTimbreFiscal) { this.versionTimbreFiscal = versionTimbreFiscal; } public String getUuid() { return Uuid; } public void setUuid(String uuid) { Uuid = uuid; } public Date getFechaTimbrado() { return fechaTimbrado; } public void setFechaTimbrado(Date fechaTimbrado) { this.fechaTimbrado = fechaTimbrado; } public String getNoCertificadoSAT() { return noCertificadoSAT; } public void setNoCertificadoSAT(String noCertificadoSAT) { this.noCertificadoSAT = noCertificadoSAT; } public String getVersionPagos() { return versionPagos; } public void setVersionPagos(String versionPagos) { this.versionPagos = versionPagos; } public List<CEPPago> getPagos() { return pagos; } public void setPagos(List<CEPPago> pagos) { this.pagos = pagos; } public List<CEPConcepto> getConceptos(){ return this.conceptos; } public void setConceptos(List<CEPConcepto> conceptos){ this.conceptos = conceptos; } public String getIdentificador() { return identificador; } public void setIdentificador(String identificador) { this.identificador = identificador; } @Override public String toString() { return "CEPCabecera{" + "version='" + version + '\'' + ", serie='" + serie + '\'' + ", folio='" + folio + '\'' + ", fecha=" + fecha + ", subTotal=" + subTotal + ", moneda='" + moneda + '\'' + ", total=" + total + ", tipoDeComprobante='" + tipoDeComprobante + '\'' + ", lugarExpedicion='" + lugarExpedicion + '\'' + ", xmlnsPago10='" + xmlnsPago10 + '\'' + ", rfcEmisor='" + rfcEmisor + '\'' + ", nombreEmisor='" + nombreEmisor + '\'' + ", regimenFiscalEmisor='" + regimenFiscalEmisor + '\'' + ", rfcReceptor='" + rfcReceptor + '\'' + ", nombreReceptor='" + nombreReceptor + '\'' + ", usoCFDIReceptor='" + usoCFDIReceptor + '\'' + ", rfcProvCertif='" + rfcProvCertif + '\'' + ", versionTibreFiscal='" + versionTimbreFiscal + '\'' + ", Uuid='" + Uuid + '\'' + ", fechaTimbrado=" + fechaTimbrado + ", noCertificadoSAT='" + noCertificadoSAT + '\'' + ", versionPagos='" + versionPagos + '\'' + ", pagos=" + pagos.size() + ", identificador=" + identificador + '}'; } } <file_sep>/src/java/com/ftc/gedoc/utiles/PeriodoRegistro.java package com.ftc.gedoc.utiles; import java.util.Date; public class PeriodoRegistro { private String registro; private String descripcion; private Date fecha; private String tipo; private double importe; private double impuesto; private String estatus; private String nota; private String evidencia; private String autoriza; public PeriodoRegistro() {} public String getRegistro() { return registro; } public void setRegistro(String registro) { this.registro = registro; } public Date getFecha() { return fecha; } public void setFecha(Date fecha) { this.fecha = fecha; } public String getTipo() { return tipo; } public void setTipo(String tipo) { this.tipo = tipo; } public double getImporte() { return importe; } public void setImporte(double importe) { this.importe = importe; } public double getImpuesto() { return impuesto; } public void setImpuesto(double impuesto) { this.impuesto = impuesto; } public String getEstatus() { return estatus; } public void setEstatus(String estatus) { this.estatus = estatus; } public String getNota() { return nota; } public void setNota(String nota) { this.nota = nota; } public String getEvidencia() { return evidencia; } public void setEvidencia(String evidencia) { this.evidencia = evidencia; } public String getDescripcion() { return descripcion; } public void setDescripcion(String descripcion) { this.descripcion = descripcion; } public String getAutoriza() { return autoriza; } public void setAutoriza(String autoriza) { this.autoriza = autoriza; } } <file_sep>/src/java/com/ftc/webcom/servlets/Expenditure.java package com.ftc.webcom.servlets; import com.ftc.aq.Comunes; import com.ftc.gedoc.bo.DocumentoBO; import com.ftc.gedoc.bo.PeriodoBo; import com.ftc.gedoc.bo.impl.DocumentoBOImpl; import com.ftc.gedoc.bo.impl.PeriodoBOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.utiles.CifraControl; import com.ftc.gedoc.utiles.Documento; import com.ftc.gedoc.utiles.Periodo; import com.ftc.gedoc.utiles.PeriodoCabecera; import com.ftc.gedoc.utiles.PeriodoRegistro; import java.io.File; import java.io.IOException; import java.io.PrintWriter; import java.util.List; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; public class Expenditure extends HttpServlet { protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=UTF-8"); PrintWriter out = response.getWriter(); String logLocation = getServletContext().getInitParameter("logLocation"); try { String cmd = request.getParameter("cmd") == null ? "" : request.getParameter("cmd"); DocumentoBO documentoBO = new DocumentoBOImpl(); if (cmd.startsWith(Comunes.toMD5("v-Cifra" + request.getSession().getId()))) { String tipo = request.getParameter("tipo"); String fecha = request.getParameter("fecha"); String importe = request.getParameter("importe"); String impuesto = request.getParameter("impuesto"); String comprobante = request.getParameter("comprobante"); String tipoGasto = request.getParameter("tipo_gasto"); PeriodoRegistro registro = new PeriodoRegistro(); registro.setFecha(Comunes.DMAtoFecha(fecha)); registro.setTipo(tipo); registro.setImporte(Double.parseDouble(importe)); registro.setImpuesto(Double.parseDouble(impuesto)); if (comprobante.startsWith("xml:")) { Documento documento = new Documento(); //mover el documento a } else { registro.setNota(comprobante); } } else if (cmd.startsWith(Comunes.toMD5("registra-Cabecera" + request.getSession().getId()))) { String asignar = request.getParameter("asignar"); String tipo = request.getParameter("tipo"); String documento = request.getParameter("documento"); String referencia = request.getParameter("referencia"); String fecha = request.getParameter("fechaGasto"); PeriodoBo bo = new PeriodoBOImpl(); PeriodoCabecera pc = new PeriodoCabecera(); pc.setAsociadoA(asignar); pc.setTipo(tipo); pc.setDocumento(documento); pc.setReferencia(referencia); pc.setFecha(Comunes.DMAtoFecha(fecha)); // --> cafaray 281217 - manejo de registros fuera de periodo: pc = bo.insertaCabecera(bo.actual().getIdentificador(), pc); if (pc.getIdentificador().isEmpty()) { out.print("Algo ocurrio y no se ingresó el nuevo registro para gasto."); } else { out.print("Se actualizo con éxito el registro."); } } else if (cmd.startsWith(Comunes.toMD5("actualizaNota-" + request.getSession().getId()).toUpperCase())) { String documento = request.getParameter("documento"); String importe = request.getParameter("importe"); String impuesto = request.getParameter("impuesto"); String fecha = request.getParameter("fecha"); String tipo = request.getParameter("tipo"); String autoriza = request.getParameter("autoriza") == null ? "" : request.getParameter("autoriza"); String cabecera = cmd.substring(Comunes.toMD5("actualizaNota-" + request.getSession().getId()).length()).toLowerCase(); PeriodoBo bo = new PeriodoBOImpl(); PeriodoRegistro pr = new PeriodoRegistro(); pr.setDescripcion("Nota de gasto"); pr.setEstatus("A"); pr.setFecha(Comunes.DMAtoFecha(fecha.replace("-", "/"))); pr.setImporte(Double.parseDouble(importe)); pr.setImpuesto(Double.parseDouble(impuesto)); pr.setNota(documento); pr.setTipo(tipo); pr.setAutoriza(autoriza); pr = bo.insertaRegistro(cabecera, pr); if (pr.getRegistro().isEmpty()) { out.print("Algo ocurrio y no se ingresó el nuevo registro para gasto."); } else { out.print("Se actualizo con éxito el registro."); } } else if (cmd.startsWith(Comunes.toMD5("xdoc-remove".concat(request.getSession().getId())).toUpperCase())) { String registro = cmd.substring(Comunes.toMD5("xdoc-remove".concat(request.getSession().getId())).length()); PeriodoBo bo = new PeriodoBOImpl(); PeriodoRegistro periodoRegistro = bo.encuentraRegistroPorId(registro); if (periodoRegistro != null) { Documento documento = new Documento(); documento = periodoRegistro.getEvidencia() == null ? null : documentoBO.findById(periodoRegistro.getEvidencia()); bo.eliminaRegistro(periodoRegistro); int archivosEliminados = 0; if (documento != null && documento.getArchivos() != null && !documento.getArchivos().isEmpty()) { String carpeta = request.getSession().getAttribute("rfc")!=null?(String)request.getSession().getAttribute("rfc"):""; eliminaArchivo(documento, carpeta); } out.printf("La acción de eliminar ha finalizado con éxito, se eliminaron %d archivos.", archivosEliminados); } else { out.print("El registro indicado no existe en la unidad."); } } else if (cmd.startsWith(Comunes.toMD5("periodoRegistro-update:tipo".concat(request.getSession().getId())).toUpperCase())) { String idRegistro = cmd.substring(Comunes.toMD5("periodoRegistro-update:tipo".concat(request.getSession().getId())).length()); String tipoComprobante = request.getParameter("valor"); PeriodoBo bo = new PeriodoBOImpl(); PeriodoRegistro periodoRegistro = bo.encuentraRegistroPorId(idRegistro); if (periodoRegistro != null) { periodoRegistro = bo.actualizaTipoComprobante(periodoRegistro, tipoComprobante); if (periodoRegistro.getTipo() != null) { out.print("Se actualizo el valor correctamente."); } else { out.print("Algo malo ocurrió al actualizar a " + idRegistro); } } else { out.print("No se localizo el registro especificado."); } } else if (cmd.startsWith(Comunes.toMD5("periodoRegistro-update:autoriza".concat(request.getSession().getId())).toUpperCase())) { String idRegistro = cmd.substring(Comunes.toMD5("periodoRegistro-update:autoriza".concat(request.getSession().getId())).length()); String autoriza = request.getParameter("valor")!=null?request.getParameter("valor"):""; PeriodoBo bo = new PeriodoBOImpl(); PeriodoRegistro periodoRegistro = bo.encuentraRegistroPorId(idRegistro); if (periodoRegistro != null) { periodoRegistro = bo.actualizaAutoriza(periodoRegistro, autoriza.trim()); if (periodoRegistro.getAutoriza() != null) { out.print("Se actualizo el valor correctamente."); } else { out.print("Algo malo ocurrió al actualizar a " + idRegistro); } } else { out.print("No se localizo el registro especificado."); } } else if (cmd.startsWith(Comunes.toMD5("elimina-cabecera".concat(request.getSession().getId())).toUpperCase())) { String idCabecera = cmd.substring(Comunes.toMD5("elimina-cabecera".concat(request.getSession().getId())).length()); String tipoGasto = request.getParameter("tipo_gasto"); PeriodoBo bo = new PeriodoBOImpl(); PeriodoCabecera periodoCabecera = bo.encuentraCabeceraPorId(idCabecera); int eliminados = 0; if (periodoCabecera != null) { List<PeriodoRegistro> registros = bo.listaRegistros(idCabecera); for (PeriodoRegistro registro : registros) { bo.eliminaRegistro(registro); if(registro.getEvidencia()!=null && !registro.getEvidencia().isEmpty()){ Documento documento = new Documento(); documento = documentoBO.findById(registro.getEvidencia()); if(documento!=null){ String carpeta = request.getSession().getAttribute("rfc")!=null?(String)request.getSession().getAttribute("rfc"):""; eliminados = eliminaArchivo(documento, carpeta); } } } bo.eliminaCabecera(periodoCabecera); out.printf("Se borraron los registros correctamente, eliminando %d archivos.", eliminados); } else { out.print("No se localizo el registro especificado."); } } else if(cmd.startsWith(Comunes.toMD5("xpenditures.cerrar-periodo".concat(request.getSession().getId())).toUpperCase())){ PeriodoBo bo = new PeriodoBOImpl(); CifraControl nuevo = bo.cierraPeriodo(bo.actual()); out.print("Se ha cerrado correctamente el periodo y se han generado las cifras control correctamente."); //--> cafaray 211217: Ajuste de periodo: } else if (cmd.startsWith(Comunes.toMD5("xpenditures.ajustar-periodo-".concat(request.getSession().getId())).toUpperCase())){ String periodo = cmd.substring(Comunes.toMD5("xpenditures.ajustar-periodo-".concat(request.getSession().getId())).toUpperCase().length()); PeriodoBo bo = new PeriodoBOImpl(); Periodo obPeriodo = bo.obtenerPeriodoPorId(periodo); if (obPeriodo!=null){ String ajuste = bo.cierraPeriodoAjuste(obPeriodo); if (ajuste.length()>0){ out.print("Se ha cerrado correctamente el ajuste al periodo y se han generado las cifras control correctamente: " + ajuste); } else { out.print("Algo ocurrio y no se logro hacer el cierre del ajuste. Revise el log de operaciones para mas detalle."); } } else { out.print("El periodo especificado no se ha localizado o este periodo no se puede ajustar. "+periodo); } // <-- } else if(cmd.startsWith(Comunes.toMD5("xpenditures.cerrar-registro-".concat(request.getSession().getId())).toUpperCase())){ String idCabecera = cmd.substring(Comunes.toMD5("xpenditures.cerrar-registro-".concat(request.getSession().getId())).length()).toLowerCase(); PeriodoBo bo = new PeriodoBOImpl(); PeriodoCabecera pc = bo.cierraCabcera(idCabecera); if(pc!=null && pc.getEstatus()!=null && pc.getEstatus().equals("C")){ out.print("Se ha cerrado correctamente el registro de gastos."); } else { out.print("Algo ocurrio y no se cerro el registro de gastos."); } } else if(cmd.startsWith(Comunes.toMD5("xpenditures.cerrar-registro-ajuste-".concat(request.getSession().getId())).toUpperCase())){ String idCabecera = cmd.substring(Comunes.toMD5("xpenditures.cerrar-registro-ajuste-".concat(request.getSession().getId())).length()).toLowerCase(); PeriodoBo bo = new PeriodoBOImpl(); PeriodoCabecera pc = bo.cierraCabceraAjuste(idCabecera); if(pc!=null && pc.getEstatus()!=null && pc.getEstatus().equals("Q")){ out.print("Se ha cerrado correctamente el registro de gastos para aplicación de ajuste."); } else { out.print("Algo ocurrio y no se cerro el registro de gastos para el ajuste."); } } else if(cmd.startsWith(Comunes.toMD5(request.getSession().getId().concat("generar-periodo-actual-")))){ PeriodoBo bo = new PeriodoBOImpl(); Periodo actual = bo.abrirPeriodo(); if (actual!=null){ out.print("Se ha generado el periodo correctamente."); } else { out.print("No se ha logrado obtener el periodo, pero al parecer este fue generado. Revise con el administrador de periodos"); } } else { out.print("La operación solicitada no fue identificada."); } } catch(NullPointerException e){ Comunes.escribeLog(logLocation, e, (String) request.getSession().getAttribute("usuario")); out.print("Al parecer no se encontró un registro, por lo que no se puede realizar la operación."); } catch (GeDocBOException e) { Comunes.escribeLog(logLocation, e, (String) request.getSession().getAttribute("usuario")); out.print(e.getMessage()); } catch (Exception e) { Comunes.escribeLog(logLocation, e, (String) request.getSession().getAttribute("usuario")); out.print(e.getMessage()); } finally { out.close(); } } private int eliminaArchivo(Documento documento, String carpeta) throws IOException { //elimina el archivo String archivos = documento.getArchivos(); StringBuilder path = new StringBuilder(getServletContext().getInitParameter("fileLocation")); path.append(carpeta).append("/"); path.append(documento.getPersona()).append("/"); String[] rutas = archivos.split(","); int xRutas = 0; if (rutas.length > 0) { for (String ruta : rutas) { File file = new File(path.toString() + ruta); if (file.exists()) { file.delete(); xRutas++; } else { System.out.printf("El archivo \"%s\" no existe, no se puede eliminar.", file.getAbsolutePath()); } } } else { throw new IOException(String.format("La ruta del documento no es valida [%s]", documento.getArchivos())); } return xRutas; } @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { //processRequest(request, response); } @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { processRequest(request, response); } @Override public String getServletInfo() { return "Short description"; }// </editor-fold> } <file_sep>/src/java/com/ftc/gedoc/bo/impl/PersonaBOImpl.java package com.ftc.gedoc.bo.impl; import com.ftc.gedoc.bo.PersonaBO; import com.ftc.gedoc.dao.PersonaDAO; import com.ftc.gedoc.dao.impl.PersonaDAOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Documento; import com.ftc.gedoc.utiles.Persona; import java.util.Collection; import java.util.List; public class PersonaBOImpl implements PersonaBO { PersonaDAO dao = new PersonaDAOImpl(); @Override public List<Persona> obtienePersonas(char tipo, String sesion) throws GeDocBOException { try{ return dao.obtienePersonas(tipo, sesion); } catch(GeDocDAOException e) { throw new GeDocBOException(e); } } @Override public Collection<Persona> localizaPersonas(char tipo, String nombre, String sesion) throws GeDocBOException { try{ return dao.localizaPersonas(tipo, nombre, sesion); } catch(GeDocDAOException e) { throw new GeDocBOException(e); } } @Override public Persona localizaPersonasPorIdentificador(String identificador) throws GeDocBOException { try{ return dao.localizaPersonasPorIdentificador(identificador); } catch(GeDocDAOException e) { throw new GeDocBOException(e); } } @Override public Collection<Persona> localizaPersonasPorRFC(char tipo, String rfc, String sesion) throws GeDocBOException { try{ return dao.localizaPersonasPorRFC(tipo, rfc, sesion); } catch(GeDocDAOException e) { throw new GeDocBOException(e); } } @Override public boolean insertaPersona(Persona persona, String sesion) throws GeDocBOException { try{ return dao.insertaPersona(persona, sesion); } catch(GeDocDAOException e) { throw new GeDocBOException(e); } } @Override public List<Documento> listadoDocumentos(String empresa, String tipo, String sesion) throws GeDocBOException { try{ return dao.listadoDocumentos(empresa, tipo, sesion); } catch(GeDocDAOException e) { throw new GeDocBOException(e); } } @Override public List<Documento> listadoDocumentos(String empresa, String tipo, String fechaInicial, String fechaFinal, String sesion) throws GeDocBOException { try{ return dao.listadoDocumentos(empresa, tipo, fechaInicial, fechaFinal, sesion); } catch(GeDocDAOException e) { throw new GeDocBOException(e); } } } <file_sep>/src/java/com/ftc/services/invoice/CEPReader.java package com.ftc.services.invoice; import com.ftc.services.invoice.modelo.CEPCabecera; import com.ftc.services.invoice.modelo.CEPConcepto; import com.ftc.services.invoice.modelo.CEPPago; import com.ftc.services.invoice.modelo.CEPPagoDocumento; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import java.io.*; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.LinkedList; import java.util.List; public class CEPReader { private static final String XML_ATRIBUTO_VERSION = "Version"; private static final String VERSION_33 = "3.3"; public CEPCabecera procesaXML(String file) throws IOException, ParserConfigurationException, SAXException { File fXmlFile = new File(file); DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder dBuilder = dbFactory.newDocumentBuilder(); Document doc = null; try { doc = dBuilder.parse(fXmlFile); doc.getDocumentElement().normalize(); boolean esCFDI = true; esCFDI = doc.getDocumentElement().getNodeName().toUpperCase().startsWith("CFDI"); String prefijo = (esCFDI ? "cfdi:" : ""); NodeList nList = doc.getElementsByTagName(prefijo + "Comprobante"); CEPCabecera cabecera = new CEPCabecera(); for (int temp = 0; temp < nList.getLength(); temp++) { Node nNode = nList.item(temp); if (nNode.getNodeType() == Node.ELEMENT_NODE) { Element eElement = (Element) nNode; String version = eElement.getAttribute(XML_ATRIBUTO_VERSION); String ns = eElement.getAttribute("xmlns:pago10"); if (version != null && version.equals(VERSION_33)) { cabecera.setVersion(version); cabecera.setSerie(eElement.getAttribute("Serie")); cabecera.setFolio(eElement.getAttribute("Folio")); cabecera.setFecha(parseDate(eElement.getAttribute("Fecha"))); cabecera.setSubTotal(parseDouble(eElement.getAttribute("SubTotal"))); cabecera.setMoneda(eElement.getAttribute("Moneda")); cabecera.setTotal(parseDouble(eElement.getAttribute("Total"))); cabecera.setLugarExpedicion(eElement.getAttribute("LugarExpedicion")); cabecera.setXmlnsPago10(ns); cabecera.setTipoDeComprobante(eElement.getAttribute("TipoDeComprobante")); } } } nList = doc.getElementsByTagName(prefijo + "Emisor"); for (int temp = 0; temp < nList.getLength(); temp++) { Node nNode = nList.item(temp); if (nNode.getNodeType() == Node.ELEMENT_NODE) { Element eElement = (Element) nNode; cabecera.setRfcEmisor(eElement.getAttribute("Rfc")); cabecera.setNombreEmisor(eElement.getAttribute("Nombre")); cabecera.setRegimenFiscalEmisor(eElement.getAttribute("RegimenFiscal")); } } nList = doc.getElementsByTagName(prefijo + "Receptor"); for (int temp = 0; temp < nList.getLength(); temp++) { Node nNode = nList.item(temp); if (nNode.getNodeType() == Node.ELEMENT_NODE) { Element eElement = (Element) nNode; cabecera.setRfcReceptor(eElement.getAttribute("Rfc")); cabecera.setNombreReceptor(eElement.getAttribute("Nombre")); cabecera.setUsoCFDIReceptor(eElement.getAttribute("UsoCFDI")); } } nList = doc.getElementsByTagName("tfd:TimbreFiscalDigital"); for (int temp = 0; temp < nList.getLength(); temp++) { Node nNode = nList.item(temp); if (nNode.getNodeType() == Node.ELEMENT_NODE) { Element eElement = (Element) nNode; cabecera.setRfcProvCertif(eElement.getAttribute("RfcProvCertif")); cabecera.setVersionTimbreFiscal(eElement.getAttribute("Version")); cabecera.setUuid(eElement.getAttribute("UUID")); cabecera.setFechaTimbrado(parseDate(eElement.getAttribute("FechaTimbrado"))); cabecera.setNoCertificadoSAT(eElement.getAttribute("NoCertificadoSAT")); } else if (nNode.getNodeType() == Node.ATTRIBUTE_NODE) { System.out.println(nNode.getNodeName()); } } nList = doc.getElementsByTagName("cfdi:Concepto"); List<CEPConcepto> conceptos = new LinkedList<CEPConcepto>(); for (int temp = 0; temp < nList.getLength(); temp++) { Node nNode = nList.item(temp); if (nNode.getNodeType() == Node.ELEMENT_NODE) { Element eElement = (Element) nNode; CEPConcepto concepto = new CEPConcepto(); concepto.setClaveProdServ(eElement.getAttribute("ClaveProdServ")); concepto.setCantidad(parseIntger(eElement.getAttribute("Cantidad"))); concepto.setClaveUnidad(eElement.getAttribute("ClaveUnidad")); concepto.setDescripcion(eElement.getAttribute("Descripcion")); concepto.setValorUnitario(parseDouble(eElement.getAttribute("ValorUnitario"))); concepto.setImporte(parseDouble(eElement.getAttribute("Importe"))); conceptos.add(concepto); } else if (nNode.getNodeType() == Node.ATTRIBUTE_NODE) { System.out.println(nNode.getNodeName()); } } cabecera.setConceptos(conceptos); nList = doc.getElementsByTagName("pago10:Pagos"); for (int temp = 0; temp < nList.getLength(); temp++) { Node nNode = nList.item(temp); if (nNode.getNodeType() == Node.ELEMENT_NODE) { Element eElement = (Element) nNode; System.out.println("Version : " + eElement.getAttribute("Version")); cabecera.setVersionPagos(eElement.getAttribute("Version")); } else if (nNode.getNodeType() == Node.ATTRIBUTE_NODE) { System.out.println(nNode.getNodeName()); } } nList = doc.getElementsByTagName("pago10:Pago"); List<CEPPago> pagos = new LinkedList<CEPPago>(); for (int temp = 0; temp < nList.getLength(); temp++) { Node nNode = nList.item(temp); if (nNode.getNodeType() == Node.ELEMENT_NODE) { Element eElement = (Element) nNode; CEPPago pago = new CEPPago(); pago.setFechaPago(parseDate(eElement.getAttribute("FechaPago"))); pago.setFormaDePago(eElement.getAttribute("FormaDePagoP")); pago.setMoneda(eElement.getAttribute("MonedaP")); pago.setMonto(parseDouble(eElement.getAttribute("Monto"))); pago.setRfcEmisorCtaOrd(eElement.getAttribute("RfcEmisorCtaOrd")); pago.setCtaOrdenante(eElement.getAttribute("CtaOrdenante")); pago.setRfcEmisorCtaBen(eElement.getAttribute("RfcEmisorCtaBen")); pago.setCtaBeneficiario(eElement.getAttribute("CtaBeneficiario")); //CEPPagoDocumento documento = new CEPPagoDocumento(); //documento.setPartida(temp+1); //pago.setDocumentoRelacionado(documento); pagos.add(pago); } else if (nNode.getNodeType() == Node.ATTRIBUTE_NODE) { System.out.println(nNode.getNodeName()); } } nList = doc.getElementsByTagName("pago10:DoctoRelacionado"); List<CEPPagoDocumento> documentos = new LinkedList<CEPPagoDocumento>(); for (int temp = 0; temp < nList.getLength(); temp++) { Node nNode = nList.item(temp); if (nNode.getNodeType() == Node.ELEMENT_NODE) { Element eElement = (Element) nNode; CEPPagoDocumento documento = new CEPPagoDocumento(); documento.setPartida(temp+1); documento.setIdDocumento(eElement.getAttribute("IdDocumento")); documento.setFolio(eElement.getAttribute("Folio")); documento.setSerie(eElement.getAttribute("Serie")); documento.setMonedaDR(eElement.getAttribute("MonedaDR")); documento.setMetodoDePagoDR(eElement.getAttribute("MetodoDePagoDR")); documento.setNumParcialidad(parseIntger(eElement.getAttribute("NumParcialidad"))); documento.setSaldoAnt(parseDouble(eElement.getAttribute("ImpSaldoAnt"))); documento.setPagado(parseDouble(eElement.getAttribute("ImpPagado"))); documento.setSaldoInsoluto(parseDouble(eElement.getAttribute("ImpSaldoInsoluto"))); documentos.add(documento); } else if (nNode.getNodeType() == Node.ATTRIBUTE_NODE) { System.out.println(nNode.getNodeName()); } } for (CEPPago pago : pagos){ /* for(CEPPagoDocumento documento:documentos){ if (pago.getDocumentoRelacionado().getPartida()==documento.getPartida()){ pago.setDocumentoRelacionado(documento); break; } } */ pago.setDocumentoRelacionado(documentos); } cabecera.setPagos(pagos); return cabecera; } catch (SAXException e) { e.printStackTrace(System.out); throw new SAXException(e); } catch (ParseException e) { e.printStackTrace(System.out); throw new SAXException(e); } catch (NumberFormatException e) { e.printStackTrace(System.out); throw new SAXException(e); } } private Date parseDate(String fecha) throws ParseException { // 2018-04-03T08:58:45 SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); Date myDate = dateFormat.parse(fecha); return myDate; } private double parseDouble(String numero) throws NumberFormatException { return Float.parseFloat(numero); } private int parseIntger(String numero) throws NumberFormatException { return Integer.parseInt(numero); } }<file_sep>/src/java/com/ftc/gedoc/dao/impl/FacturaCabeceraDAOImpl.java package com.ftc.gedoc.dao.impl; import com.ftc.aq.Conexion; import com.ftc.aq.SpParam; import com.ftc.aq.SpParams; import com.ftc.gedoc.utiles.FacturaCabecera; import com.ftc.gedoc.dao.FacturaCabeceraDAO; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Types; public class FacturaCabeceraDAOImpl implements FacturaCabeceraDAO{ Connection conexion; public FacturaCabeceraDAOImpl(){} @Override public String insertar(FacturaCabecera facturaCabecera, String sesion) throws SQLException { try{ conexion = Conexion.getConexion(); SpParams params = new SpParams(); params.add(new SpParam(1, Types.VARCHAR, facturaCabecera.getPersona())); params.add(new SpParam(2, Types.VARCHAR, facturaCabecera.getCdDocumento())); params.add(new SpParam(3, Types.VARCHAR, facturaCabecera.getLocacion())); params.add(new SpParam(4, Types.VARCHAR, facturaCabecera.getTipo())); params.add(new SpParam(5, Types.VARCHAR, facturaCabecera.getSerie())); params.add(new SpParam(6, Types.VARCHAR, facturaCabecera.getFolio())); params.add(new SpParam(7, Types.VARCHAR, facturaCabecera.getStrFecha())); params.add(new SpParam(8, Types.VARCHAR, facturaCabecera.getFormaDePago())); params.add(new SpParam(9, Types.DOUBLE, facturaCabecera.getSubTotal())); params.add(new SpParam(10, Types.DOUBLE, facturaCabecera.getDescuento())); params.add(new SpParam(11, Types.DOUBLE, Double.parseDouble(facturaCabecera.getTipoCambio()))); params.add(new SpParam(12, Types.DOUBLE, facturaCabecera.getTotal())); params.add(new SpParam(13, Types.VARCHAR, facturaCabecera.getMoneda())); params.add(new SpParam(14, Types.VARCHAR, facturaCabecera.getMetodoDePago())); params.add(new SpParam(15, Types.VARCHAR, facturaCabecera.getLugarExpedicion())); params.add(new SpParam(16, Types.VARCHAR, facturaCabecera.getRfc())); params.add(new SpParam(17, Types.VARCHAR, facturaCabecera.getNombre())); params.add(new SpParam(18, Types.VARCHAR, facturaCabecera.getRfcReceptor())); params.add(new SpParam(19, Types.VARCHAR, facturaCabecera.getNombreReceptor())); params.add(new SpParam(20, Types.DOUBLE, facturaCabecera.getTotalImpuestosTrasladados())); params.add(new SpParam(21, Types.VARCHAR, facturaCabecera.getUuid())); params.add(new SpParam(22, Types.VARCHAR, facturaCabecera.getStrFechaTimbrado())); params.add(new SpParam(23, Types.VARCHAR, sesion)); params.add(new SpParam(24, Types.VARCHAR, null, true)); //referencia params.add(new SpParam(25, Types.VARCHAR, null, true)); //error Object[] vuelta = Conexion.ejecutaStoreProcedureConSalida(conexion, "registraCabeceraFactura", params); if (vuelta != null && vuelta.length == 2) { if (String.valueOf(vuelta[1]).length() > 0) { return String.valueOf(vuelta[1]); } else { return String.valueOf(vuelta[0]); } }else{ throw new SQLException("No se logro ejecutar el procedimiento almacenado.", "20000", -5001); } }catch(SQLException e){ e.printStackTrace(System.out); throw e; } } @Override public int eliminar(FacturaCabecera facturaCabecera) throws SQLException { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } @Override public FacturaCabecera findById(String cdfile) throws SQLException { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } @Override public FacturaCabecera removeById(String cdfile) throws SQLException { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } @Override public int actualizar(FacturaCabecera facturaCabecera) throws SQLException { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } } <file_sep>/src/java/com/ftc/gedoc/bo/NotificacionBO.java package com.ftc.gedoc.bo; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.utiles.Notificacion; import java.util.List; public interface NotificacionBO { /*** * registra una nueva notificacion dentro del área de persistencia de datos * @param notificacion Objeto que será persistido * @return Notificacion Objeto con el identificador que se asigno * @throws GeDocBOException */ Notificacion registrar(Notificacion notificacion) throws GeDocBOException; /*** * cancela una notificacion a traves de su codigo de identificacion dentro del campo de persistencia * @param notificacion Objeto de notificacion que se cancelara * @return Notificacion con el nuevo estatus de cancelacion * @throws GeDocBOException */ Notificacion cancelarNotificacion(Notificacion notificacion) throws GeDocBOException; /*** * cancela varias notificaciones a la vez, se hace una iteracion sobre el listado de notificaciones enviadas y se va llamando a cancelaNotificacion * @param notificaciones Listado con las notificaciones a cancelar * @return valor entero que indica cuantas notificaciones se cancelaron con exito * @throws GeDocBOException */ int cancelarNotificaciones(List<Notificacion> notificaciones) throws GeDocBOException; /*** * listado de todas las notificaciones con estatus de pendiente dentro del area de persistencia * @param empresa Empresa de la que se recuperaran las notificaciones * @return java.util.List<Notificacion> * @throws GeDocBOException */ List<Notificacion> notificaciones(String empresa) throws GeDocBOException; /*** * listado de todas las notificaciones con estatus de pendiente dentro del ŕrea de persistencia por tipo de notificacion * @param empresa Empresa de la que se recuperaran las notificaciones * @param estatus Se filtra por el estatus que se pide en el listado * @return * @throws GeDocBOException */ List<Notificacion> notificaciones(String empresa, String estatus) throws GeDocBOException; } <file_sep>/db/ctrldoce_casadelpastel.sql -- ----------------------------------------------------- -- -- BASE DE DATOS PARA CONTROL DE DOCUMENTOS ELECTRONICOS -- -- DESARROLLADO POR FARIAS TELECOMUNICACIONES Y COMPUTO -- -- FECHA DE CREACION: 21 julio 2014 -- -- ARCHIVO SCRIPT PARA LA GENERACIÓN DE ENTORNO DE -- -- TRABAJO DE LA BASE DE DATOS. LENGUAJE MYSQL -- -- ----------------------------------------------------- -- DROP DATABASE IF EXISTS ctrldoce_casadelpastel; CREATE DATABASE ctrldoce_casadelpastel CHARACTER SET = 'latin1'; USE ctrldoce_casadelpastel; GRANT SELECT, INSERT, UPDATE, EXECUTE ON ctrldoce_casadelpastel.* TO 'sysadmindoce'@'localhost' IDENTIFIED BY 'Sv6lOu/Vs'; CREATE FUNCTION getUser() RETURNS VARCHAR(32) DETERMINISTIC RETURN SUBSTRING(user(),1,INSTR(user(),'@')-1); CREATE TABLE kaqcidt ( cdobjeto CHAR(2) NOT NULL, feregistro DATETIME NOT NULL, dsobjeto VARCHAR(70) NOT NULL, idactual CHAR(16) NOT NULL, inactivo CHAR(1) NOT NULL, cdusuari CHAR(32) NOT NULL, programa VARCHAR(45) NOT NULL, tmpstmp DATETIME NOT NULL, PRIMARY KEY(cdobjeto) )ENGINE=INNODB; CREATE TABLE jpem90t ( idsesion VARCHAR(32) NOT NULL, cdusulog CHAR(16) NOT NULL, dsipfrom VARCHAR(15) NOT NULL, dsusulog VARCHAR(100) NOT NULL, cdusuari VARCHAR(32) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idsesion) )ENGINE=INNODB; CREATE TABLE jpem00t ( cdperson CHAR(16) NOT NULL, dsrazsoc VARCHAR(120) NOT NULL, dsrfc VARCHAR(13) NOT NULL, dsfolder VARCHAR(16) NOT NULL, dslogo VARCHAR(35) NOT NULL, isowner ENUM('S','N') NOT NULL DEFAULT 'N', intipprs ENUM('-','C','P') NOT NULL DEFAULT '-', cdusuari VARCHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdperson) )ENGINE=INNODB; CREATE TABLE jpem10t ( cdperson CHAR(16) NOT NULL, cdcontac CHAR(16) NOT NULL, dsfirst VARCHAR(35) NOT NULL, dslast VARCHAR(60) NOT NULL, dsmail VARCHAR(100) NOT NULL, dstelloc VARCHAR(60) NOT NULL DEFAULT '', dstelmov VARCHAR(14) NOT NULL DEFAULT '', cdusuari CHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, dsipfrom VARCHAR(15) NOT NULL, PRIMARY KEY(cdperson,cdcontac), FOREIGN KEY(cdperson) REFERENCES jpem00t(cdperson) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; CREATE TABLE jgrm01t ( cdidegrp CHAR(1) NOT NULL, dsidegrp VARCHAR(35) NOT NULL, ingrpmod INT NOT NULL DEFAULT 0, cdusuari VARCHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdidegrp) )ENGINE=INNODB; CREATE TABLE jusm01t ( cdperson CHAR(16) NOT NULL, cdcontac CHAR(16) NOT NULL, cdidegrp CHAR(1) NOT NULL, cdusulog VARCHAR(100) NOT NULL, dsvalenc VARCHAR(64) NOT NULL, instatus ENUM('A','E','S') NOT NULL DEFAULT 'A', inusumod INT NOT NULL DEFAULT 0, dsipfrom VARCHAR(15) NOT NULL, cdusuari CHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdperson,cdcontac), FOREIGN KEY(cdperson,cdcontac) REFERENCES jpem10t(cdperson,cdcontac) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; CREATE TABLE jdem10t ( cdperson CHAR(16) NOT NULL, cddocele CHAR(16) NOT NULL, dsfiles VARCHAR(500) NOT NULL COMMENT 'Nombre original de los archivos', dstitle VARCHAR(35) NOT NULL DEFAULT 'Documento', dsobserv VARCHAR(2000) NOT NULL DEFAULT '' COMMENT 'Descripción detallada del documento', instatus CHAR(1) NOT NULL, tschgstt DATETIME NULL, cdusuari CHAR(16) NOT NULL, dsipfrom VARCHAR(15) NOT NULL, cdusumod CHAR(16) NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdperson,cddocele), FOREIGN KEY(cdperson) REFERENCES jpem00t(cdperson) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; CREATE TABLE jsegsot ( cdsolres VARCHAR(64) NOT NULL, dsmail VARCHAR(120) NOT NULL, dsrfc VARCHAR(13) NOT NULL, tsfecsol DATETIME NOT NULL, tsfecdwn DATETIME NOT NULL, instatus CHAR(1) NOT NULL, dsagent VARCHAR(500) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdsolres) ); -- START PROCEDURES AND FUNCTIONS DELIMITER // DROP PROCEDURE IF EXISTS ingresaSolicitudReset; CREATE PROCEDURE ingresaSolicitudReset(IN correo VARCHAR(120), IN rfc VARCHAR(13), IN agente VARCHAR(500), OUT referencia VARCHAR(64), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = correo AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); IF(existe>0)THEN IF (agente='')THEN SET error = 'Error 50051: No se han especificado todos los valores requeridos.'; ELSE SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE dsmail = correo AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP); IF(existe>0)THEN SET error = 'Error 50054: Hay una solicitud pendiente, no se puede registrar otra.'; ELSE SELECT CURRENT_TIMESTAMP INTO cts; SELECT CONCAT(MD5(CONCAT(cts,'zreset')),MD5(CONCAT(cts,correo))) INTO referencia; INSERT INTO jsegsot (cdsolres,dsmail,dsrfc,tsfecsol,tsfecdwn,instatus,dsagent,tmstmp) VALUES (referencia,correo,rfc,CURRENT_TIMESTAMP,ADDTIME(CURRENT_TIMESTAMP, '24:00:000.0'),'A',agente,cts); SET error = ''; END IF; END IF; ELSE SET error = 'Error 50050: Este usuario no se encuentra registrado.'; END IF; END; // DROP PROCEDURE IF EXISTS aplicaSolicitudReset; CREATE PROCEDURE aplicaSolicitudReset(IN identificador VARCHAR(64), IN valenc VARCHAR(16), IN ipfrom VARCHAR(15), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE contacto CHAR(16) DEFAULT ''; DECLARE rfc VARCHAR(13) DEFAULT ''; DECLARE usuario VARCHAR(100) DEFAULT ''; SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE cdsolres = identificador AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP) ; IF(existe>0)THEN SELECT COUNT(cdsolres) INTO existe FROM jsegsot WHERE cdsolres = identificador AND DATE(tsfecdwn) <= DATE(CURRENT_TIMESTAMP) AND instatus = 'A'; IF(existe>0)THEN SELECT dsmail, dsrfc INTO usuario, rfc FROM jsegsot WHERE cdsolres = identificador; SELECT cdcontac INTO contacto FROM jpem10t WHERE dsmail = usuario; UPDATE jusm01t SET dsvalenc = CONCAT(MD5(CONCAT(contacto,tmstmp)),MD5(valenc)), programa = CONCAT('RSPWD',ipfrom) WHERE cdusulog = usuario AND cdperson = (SELECT cdperson FROM jpem00t WHERE dsrfc = rfc); SET error = ''; ELSE SET error = 'Error 50053: Esta solicitud ya fue aplicada.'; END IF; ELSE SET error = 'Error 50052: Esta solicitud ya no existe.'; END IF; END; // DROP PROCEDURE IF EXISTS listaDocumentos; CREATE PROCEDURE listaDocumentos(IN persona CHAR(16), IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0) THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(A.tmstmp) FROM jdem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE intipprs = tipo ORDER BY empresa, A.tmstmp desc; ELSE SELECT cdperson, nombreEmpresa(cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(tmstmp) FROM jdem10t WHERE cdperson = persona ORDER BY tmstmp desc; END IF; ELSE SELECT 'Sesión no valida.','','','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaDocumentosFiltro; CREATE PROCEDURE listaDocumentosFiltro(IN persona CHAR(16), IN tipo CHAR(1), IN fechai DATE, IN fechaf DATE, IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0) THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(A.tmstmp) FROM jdem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE intipprs = tipo AND DATE(A.tmstmp) BETWEEN fechai AND fechaf ORDER BY empresa, A.tmstmp desc; ELSE SELECT cdperson, nombreEmpresa(cdperson) empresa, cddocele, dsfiles, dstitle,dsobserv, instatus, DATE(tmstmp) FROM jdem10t WHERE cdperson = persona AND DATE(tmstmp) BETWEEN fechai AND fechaf ORDER BY tmstmp desc; END IF; ELSE SELECT 'Sesión no valida.','','','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS registraDocumento; CREATE PROCEDURE registraDocumento(IN persona CHAR(16), IN archivo VARCHAR(500), IN titulo VARCHAR(35), IN observaciones VARCHAR(2000), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT getCodigoApp('DE') INTO referencia; INSERT INTO jdem10t (cdperson,cddocele,dsfiles,dstitle,dsobserv,instatus,cdusuari,dsipfrom,programa,tmstmp) VALUES (persona,referencia,archivo,titulo,observaciones,'A',obtieneUsuario(sesion),obtieneIp(sesion),'registraDocumento',CURRENT_TIMESTAMP); SET error = ''; ELSE SET error = 'Error 50039: La sesión no es correcta. Restablezca la aplicaicón.'; END IF; END; // DROP PROCEDURE IF EXISTS getCodigo; CREATE PROCEDURE getCodigo(IN var_objeto char(2), IN var_programa char(12), OUT var_codigo char(16)) BEGIN DECLARE lon int; DECLARE inprestado int; SELECT COUNT(inactivo) INTO inprestado FROM kaqcidt WHERE inactivo='n' AND cdobjeto=var_objeto; WHILE (inprestado=0) DO SELECT COUNT(inactivo) INTO inprestado FROM kaqcidt WHERE inactivo='n' AND cdobjeto=var_objeto; END WHILE; IF (inprestado>0) THEN SELECT (CAST(SUBSTRING(idactual,1,16) AS UNSIGNED) + CAST(SUBSTRING(SIN(CAST(SUBSTRING(idactual,1,16) AS UNSIGNED)),4,1) AS UNSIGNED))+3 INTO var_codigo FROM kaqcidt WHERE cdobjeto = var_objeto; SELECT CONCAT(REPEAT('0', 16-LENGTH(var_codigo)),var_codigo) INTO var_codigo; UPDATE kaqcidt SET inactivo = 's' , idactual = var_codigo, cdusuari = getUser(), programa = var_programa WHERE cdobjeto = var_objeto; END IF; END; // DROP PROCEDURE IF EXISTS setCodigoApp; CREATE PROCEDURE setCodigoApp(var_cdobjeto char(2)) BEGIN UPDATE kaqcidt SET inactivo = 'n' WHERE cdobjeto = var_cdobjeto; END // DROP FUNCTION IF EXISTS getCodigoApp; CREATE FUNCTION getCodigoApp(tipo char(2)) RETURNS CHAR(16) DETERMINISTIC BEGIN CALL getCodigo(tipo,'function.getCodigo', @x); CALL setCodigoApp(tipo); RETURN @x; END // DROP FUNCTION IF EXISTS obtieneUsuario; CREATE FUNCTION obtieneUsuario(sesion VARCHAR(32)) RETURNS CHAR(16) DETERMINISTIC BEGIN RETURN (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); END // CREATE FUNCTION obtieneIp(sesion CHAR(32)) RETURNS VARCHAR(15) DETERMINISTIC BEGIN RETURN (SELECT dsipfrom FROM jpem90t WHERE idsesion = sesion); END; // DROP PROCEDURE IF EXISTS registraPersona; CREATE PROCEDURE registraPersona(IN razonsocial VARCHAR(120), IN rfc VARCHAR(13), IN tipo ENUM('C','P'), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(dsrfc) INTO existe FROM jpem00t WHERE dsrfc = rfc; IF(existe>0)THEN SET error = 'Error 50031: Este RFC ya se encuentra registrado, no se puede volver a registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('PR') INTO referencia; INSERT INTO jpem00t (cdperson,dsrazsoc,dsrfc,dsfolder,dslogo,isowner,intipprs,cdusuari,programa,tmstmp) VALUE (referencia,razonsocial,rfc,referencia,'','N',tipo,obtieneUsuario(sesion),'registraPersona',CURRENT_TIMESTAMP); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave dúplicada en personas. Notifique a sistemas.'; END IF; END; // DROP FUNCTION IF EXISTS obtienePropietario; CREATE FUNCTION obtienePropietario(sesion VARCHAR(32)) RETURNS CHAR(16) DETERMINISTIC BEGIN DECLARE existe INT DEFAULT 0; DECLARE persona CHAR(16) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdusulog INTO persona FROM jpem90t WHERE idsesion = sesion; RETURN (SELECT cdperson FROM jpem10t WHERE cdcontac = persona); ELSE RETURN ''; END IF; END; // DROP PROCEDURE IF EXISTS obtienePersonas; CREATE PROCEDURE obtienePersonas(IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN IF (tipo= '-') THEN SELECT COUNT(cdperson) INTO existe FROM jpem00t WHERE isowner = 'S' AND cdperson = obtienePropietario(sesion); IF (existe=1)THEN SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE cdperson = obtienePropietario(sesion) AND isowner = 'S'; ELSE SELECT 'No se permite ','esta ', 'consulta.','-'; END IF; ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE intipprs = tipo AND isowner = 'N'; END IF; ELSE SELECT 'Sesión ','no ', 'valida.',''; END IF; END; // DROP PROCEDURE IF EXISTS localizaPersonas; CREATE PROCEDURE localizaPersonas(IN tipo CHAR(1), IN nombre VARCHAR(35), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE condicion VARCHAR(37) DEFAULT ''; DECLARE micodigo CHAR(16) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdperson INTO micodigo FROM jpem10t WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); SELECT COUNT(isowner) INTO existe FROM jpem00t WHERE cdperson = micodigo AND isowner = 'S'; IF(existe>0)THEN SELECT CONCAT('%',nombre,'%') INTO condicion; IF(tipo='*') THEN SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE isowner = 'N' AND dsrazsoc LIKE (condicion); ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE intipprs = tipo AND isowner = 'N' AND dsrazsoc LIKE (condicion); END IF; ELSE SELECT cdperson, dsrazsoc, dsrfc, intipprs FROM jpem00t WHERE cdperson = micodigo; END IF; ELSE SELECT 'Sesión ','no ', 'valida.',''; END IF; END; // DROP PROCEDURE IF EXISTS registraContacto; CREATE PROCEDURE registraContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo CHAR(1), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo; IF (existe>0)THEN SET error = 'Error 50031: Este correo electrónico ya se encuentra registrado, no se puede volver a registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('CT') INTO referencia; SELECT CURRENT_TIMESTAMP INTO cts; INSERT INTO jpem10t (cdperson,cdcontac,dsfirst,dslast,dsmail,dstelloc,dstelmov,cdusuari,programa,tmstmp,dsipfrom) VALUES (persona,referencia,primero,segundo,correo,telefono,movil,obtieneUsuario(sesion),'registraContacto',cts,obtieneIp(sesion)); INSERT INTO jusm01t (cdperson,cdcontac,cdidegrp,cdusulog,dsvalenc,instatus,inusumod,dsipfrom,cdusuari,programa,tmstmp) VALUES (persona,referencia,grupo,correo,CONCAT(MD5(CONCAT(referencia,cts)),MD5(valenc)),'A',0,obtieneIp(sesion),obtieneUsuario(sesion),'registraContacto',cts); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave dúplicada en personas. Notifique a sistemas.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaContacto; CREATE PROCEDURE actualizaContacto(IN identificador CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), IN telefono VARCHAR(60), IN movil VARCHAR(14), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo AND cdcontac <> identificador; IF (existe>0)THEN SET error = 'Error 50031: Este correo electrónico ya se encuentra registrado, no se puede volver a registrar.'; ELSE UPDATE jpem10t SET dsfirst = primero, dslast = segundo, dsmail = correo, dstelloc = telefono, dstelmov = movil, programa = CONCAT('ACT',sesion) WHERE cdcontac = identificador; UPDATE jusm01t SET cdusulog = correo, programa = CONCAT('ACT',sesion) WHERE cdcontac = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS obtieneContactos; CREATE PROCEDURE obtieneContactos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE persona CHAR(16) DEFAULT ''; DECLARE tipo CHAR(1) DEFAULT ''; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, intipprs INTO persona, tipo FROM jpem10t A INNER JOIN jpem00t B ON A.cdperson = B.cdperson WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion); CALL obtieneContactosPersona(persona, tipo, sesion); ELSE SELECT '','','Sesión no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS obtieneContactosPersona; CREATE PROCEDURE obtieneContactosPersona(IN persona VARCHAR(16), IN tipo CHAR(1), IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN IF (persona = '*') THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac INNER JOIN jpem00t C ON A.cdperson = C.cdperson WHERE intipprs = tipo; ELSE SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson = persona AND persona NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S'); END IF; ELSE SELECT '','','Sesión no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaContactos; CREATE PROCEDURE listaContactos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S') AND instatus = 'A'; ELSE SELECT '','','Sesión no valida.','','','','',''; END IF; END; // DROP PROCEDURE IF EXISTS listaContactosSuspendidos; CREATE PROCEDURE listaContactosSuspendidos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, nombreEmpresa(A.cdperson) empresa, A.cdcontac, dsfirst, dslast, dsmail, dstelloc, dstelmov, cdidegrp FROM jpem10t A INNER JOIN jusm01t B ON A.cdperson = B.cdperson AND A.cdcontac = B.cdcontac WHERE A.cdperson NOT IN (SELECT cdperson FROM jpem00t WHERE isowner = 'S') AND instatus = 'S'; ELSE SELECT '','','Sesión no valida.','','','','',''; END IF; END; // DROP FUNCTION IF EXISTS nombreEmpresa; CREATE FUNCTION nombreEmpresa(empresa CHAR(16)) RETURNS VARCHAR(100) DETERMINISTIC BEGIN RETURN (SELECT dsrazsoc FROM jpem00t WHERE cdperson = empresa); END; // DROP PROCEDURE IF EXISTS actualizaGrupoContacto; CREATE PROCEDURE actualizaGrupoContacto(IN contacto CHAR(16), IN grupo CHAR(1), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET cdidegrp = grupo, programa = CONCAT('NWGRP',grupo,obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS actualizaAccesoContacto; CREATE PROCEDURE actualizaAccesoContacto(IN usuario VARCHAR(120), IN valenc VARCHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; DECLARE contacto CHAR(16) DEFAULT ''; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = usuario; IF(existe>0)THEN SELECT cdcontac INTO contacto FROM jpem10t WHERE dsmail = usuario; SELECT COUNT(isowner) INTO existe FROM jpem00t WHERE isowner = 'S' AND cdperson = (SELECT cdperson FROM jpem10t WHERE cdcontac = (SELECT cdusulog FROM jpem90t WHERE idsesion = sesion)); IF (existe>0)THEN UPDATE jusm01t SET dsvalenc = CONCAT(MD5(CONCAT(contacto,tmstmp)),MD5(valenc)), programa = CONCAT('NWPWD',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = usuario; SET error = ''; ELSE SET error = 'Error 50040: Acceso a operación denegada.'; END IF; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS suspendeAccesoContacto; CREATE PROCEDURE suspendeAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'S', dsvalenc = CONCAT(MD5(CONCAT(contacto,CURRENT_TIMESTAMP)),MD5('USUARIO-SUSPENDIDO')), programa = CONCAT('MDSUS',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS eliminaAccesoContacto; CREATE PROCEDURE eliminaAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'E', dsvalenc = CONCAT(MD5(CONCAT(CURRENT_TIMESTAMP,'0000000000000000')),MD5('USUARIO-ELIMINADO')), programa = CONCAT('MDELM',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS activaAccesoContacto; CREATE PROCEDURE activaAccesoContacto(IN contacto CHAR(16), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET instatus = 'A', programa = CONCAT('MDACT',obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS whois; CREATE PROCEDURE whois(IN usuario VARCHAR(100), IN valor VARCHAR(16), IN rfc VARCHAR(13), IN ipfrom VARCHAR(15), IN sesion VARCHAR(32), OUT seguridad INT, OUT referencia CHAR(16), OUT propietario CHAR(1), OUT persona CHAR(16), OUT error VARCHAR(255)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(dsrfc) INTO existe FROM jpem00t WHERE dsrfc = rfc; IF (existe>0)THEN SELECT cdperson INTO persona FROM jpem00t WHERE dsrfc = rfc; SELECT COUNT(cdcontac) INTO existe FROM jusm01t WHERE cdusulog = usuario AND dsvalenc = CONCAT(MD5(CONCAT(cdcontac,tmstmp)),MD5(valor)); IF(existe>0)THEN SELECT cdcontac INTO referencia FROM jusm01t WHERE cdusulog = usuario AND dsvalenc = CONCAT(MD5(CONCAT(cdcontac,tmstmp)),MD5(valor)); SELECT COUNT(cdperson) INTO existe FROM jpem10t WHERE cdperson = persona AND cdcontac = referencia; IF(existe>0)THEN SELECT ingrpmod INTO seguridad FROM jgrm01t WHERE cdidegrp = (SELECT cdidegrp FROM jusm01t WHERE cdcontac = referencia); SELECT isowner INTO propietario FROM jpem00t WHERE cdperson = persona; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT COUNT(dsipfrom) INTO existe FROM jpem90t WHERE idsesion = sesion AND dsipfrom = ipfrom; IF(existe>0)THEN SET error = ''; ELSE SET error = 'Error 50043: No se puede validar la sesión. Espere unos minutos para volver a intentarlo.'; END IF; ELSE INSERT INTO jpem90t (idsesion,cdusulog,dsusulog,dsipfrom,cdusuari,programa,tmstmp) VALUES (sesion,referencia,usuario,ipfrom,getUser(),'insertaSesion',CURRENT_TIMESTAMP); SET error = ''; END IF; ELSE SET error = 'Error 50042:Este usuario no esta asociado al cliente.'; END IF; ELSE SET error = 'Error 50041:El usuario o la contraseña son incorrectos.'; END IF; ELSE SET error = 'Error 50040:El registro de empresa no existe.'; END IF; END; // -- SELECT COUNT(cdcontac) FROM jusm01t WHERE cdusulog = '<EMAIL>' AND dsvalenc = CONCAT(MD5(CONCAT(cdcontac,tmstmp)),MD5('cfar52Bio+')); -- CALL whois('<EMAIL>','cfar52Bio+','FAAC750415PZ0','127.0.0.1','b30c1016944d4a629f647da5a75e',@1,@2,@3,@4,@5); DROP PROCEDURE IF EXISTS listaGrupos; CREATE PROCEDURE listaGrupos(IN sesion VARCHAR(32)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT cdidegrp, dsidegrp FROM jgrm01t WHERE cdidegrp NOT IN ('*','A') ORDER BY dsidegrp; ELSE SELECT '','La sesion no existe'; END IF; END; // DROP PROCEDURE IF EXISTS consultaValoresSesion; CREATE PROCEDURE consultaValoresSesion(IN sesion VARCHAR(32), OUT identificador CHAR(16), OUT dominio CHAR(16), OUT razonsocial VARCHAR(100), OUT folder CHAR(16), OUT tipoPersona CHAR(1), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(idsesion) INTO existe FROM jpem90t WHERE idsesion = sesion; IF(existe>0)THEN SELECT A.cdperson, B.cdcontac, A.dsrazsoc, A.dsfolder, A.intipprs INTO dominio, identificador, razonsocial, folder, tipoPersona FROM jpem00t A INNER JOIN jpem10t B ON A.cdperson = B.cdperson INNER JOIN jpem90t C ON B.cdcontac = C.cdusulog WHERE idsesion = sesion; SET error = ''; ELSE SET error = 'Error 50039: La sesión no existe.'; END IF; END; // DROP TABLE IF EXISTS jdem20t; CREATE TABLE jdem20t ( cdperson VARCHAR(32) NOT NULL, cddocele VARCHAR(32) NOT NULL, cdfile VARCHAR(32) NOT NULL, dsfile VARCHAR(120) NOT NULL, dstipo VARCHAR(30) NOT NULL, dsserie VARCHAR(30) NOT NULL, dsfolio INT NOT NULL, dsfecha VARCHAR(30) NOT NULL, dsfrmpag VARCHAR(250) NOT NULL, dbsubtot DOUBLE(13,2) NOT NULL DEFAULT 0, dbdescto DOUBLE(13,2) NOT NULL DEFAULT 0, dbtipcam DOUBLE(13,2) NOT NULL DEFAULT 0, dbtotal DOUBLE(16,2) NOT NULL DEFAULT 0, dsmoneda VARCHAR(30) NOT NULL, dsmetpag VARCHAR(250) NOT NULL, dslugexp VARCHAR(250) NOT NULL, dsrfcemi VARCHAR(13) NOT NULL, dsnomemi VARCHAR(120) NOT NULL, dsrfcrec VARCHAR(13) NOT NULL, dsnomrec VARCHAR(120) NOT NULL, dbimptra DOUBLE(13,2) NOT NULL, dsuuid VARCHAR(40) NULL, dsfectim VARCHAR(30) NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, cdusuari VARCHAR(16) NOT NULL, PRIMARY KEY(cdperson,cddocele,cdfile), FOREIGN KEY(cdperson,cddocele) REFERENCES jdem10t(cdperson,cddocele) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=InnoDb; ALTER TABLE jdem20t MODIFY COLUMN dsfrmpag VARCHAR(250) NOT NULL; ALTER TABLE jdem20t MODIFY COLUMN dsmetpag VARCHAR(250) NOT NULL; ALTER TABLE jdem20t MODIFY COLUMN dslugexp VARCHAR(250) NOT NULL; DROP PROCEDURE IF EXISTS registraCabeceraFactura; CREATE PROCEDURE registraCabeceraFactura(IN persona CHAR(16),IN documento VARCHAR(35),IN archivo VARCHAR(500),IN tipo VARCHAR(100),IN serie VARCHAR(60),IN folio INT, IN fecha VARCHAR(30), IN formaPago VARCHAR(60), IN subTotal DOUBLE(13,2), IN descuento DOUBLE(13,2), IN tipoCambio DOUBLE(13,2), IN total DOUBLE(16,2), IN moneda VARCHAR(30), IN metodo VARCHAR(60), IN expedicion VARCHAR(60), IN rfcemisor VARCHAR(13), IN emisor VARCHAR(120), IN rfcreceptor VARCHAR(13), IN receptor VARCHAR(120),IN impuesto DOUBLE(13,2), IN uuid VARCHAR(40),IN timbre VARCHAR(21), IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(cdfile) INTO existe FROM jdem20t WHERE cdperson=persona AND cddocele=documento; IF (existe>0)THEN SET error = 'Error 50031: Ya existe un registro asociado al documento. No se puede registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('FL') INTO referencia; SELECT CURRENT_TIMESTAMP INTO cts; INSERT INTO jdem20t (cdperson,cddocele,cdfile,dsfile,dstipo,dsserie,dsfolio,dsfecha,dsfrmpag,dbsubtot,dbdescto,dbtipcam,dbtotal,dsmoneda,dsmetpag,dslugexp,dsrfcemi,dsnomemi,dsrfcrec,dsnomrec, dbimptra,dsuuid,dsfectim,programa,tmstmp,cdusuari) VALUES (persona,documento,referencia,archivo,tipo,serie,folio,fecha,formaPago,subtotal,descuento,tipocambio,total,moneda,metodo,expedicion,rfcemisor,emisor,rfcreceptor,receptor,impuesto,uuid,timbre,'spInsertaFactura',CURRENT_TIMESTAMP,obtieneUsuario(sesion)); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave dúplicada en facturas. Notifique a sistemas.'; END IF; END; // DELIMITER ; INSERT INTO ctrldoce_casadelpastel.kaqcidt VALUES ('PR',CURRENT_TIMESTAMP,'Controlador de identificacion para clientes','0000000000000009','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO ctrldoce_casadelpastel.kaqcidt VALUES ('DE',CURRENT_TIMESTAMP,'Controlador de identificacion para documentos','0000000000000007','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO ctrldoce_casadelpastel.kaqcidt VALUES ('CT',CURRENT_TIMESTAMP,'Controlador de identificacion para contactos','0000000000000011','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO ctrldoce_casadelpastel.kaqcidt VALUES ('FL',CURRENT_TIMESTAMP,'Controlador de identificacion para archivos','0000000000000009','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); DELETE FROM ctrldoce_casadelpastel.jgrm01t; -- GRUPOS: [000000.000000.0000] 0 -- Proveedores: [011100.000000.0000] 28672 -- Gestor proveedores [111111.000000.0000] 64512 -- Clientes: [000000.001000.0000] 128 -- Gestor clientes: [000000.111111.0000] 1008 -- Administrador: [000000.000000.1111] 15 -- Gestor no administrador: [111111.111111.0000] 65520 -- Control total: [111100.111100.1111] 62407, 62415 INSERT INTO ctrldoce_casadelpastel.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('C', 'Clientes','128','sysadmindoce','manual',CURRENT_TIMESTAMP); INSERT INTO ctrldoce_casadelpastel.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('P', 'Proveedores','28672','sysadmindoce','manual',CURRENT_TIMESTAMP); INSERT INTO ctrldoce_casadelpastel.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('A', 'Administrador','7','sysadmindoce','manual',CURRENT_TIMESTAMP); INSERT INTO ctrldoce_casadelpastel.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('*', 'Control total','62415','sysadmindoce','manual',CURRENT_TIMESTAMP); -- INSERT INTO ctrldoce_casadelpastel.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('v', 'Gestor de Proveedores','64512','sysadmindoce','manual',CURRENT_TIMESTAMP); -- INSERT INTO ctrldoce_casadelpastel.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('t', 'Gestor de clientes','1008','sysadmindoce','manual',CURRENT_TIMESTAMP); -- INSERT INTO ctrldoce_casadelpastel.jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('s', 'Gestor de documentos','65520','sysadmindoce','manual',CURRENT_TIMESTAMP); -- UPDATE ctrldoce_casadelpastel.jgrm01t SET ingrpmod = 62415 WHERE cdidegrp = '*'; -- Insertar usuarios dueños de la base de datos: INSERT INTO ctrldoce_casadelpastel.jpem00t (cdperson,dsrazsoc,dsrfc,dsfolder,dslogo,isowner,intipprs,cdusuari,programa,tmstmp) VALUE ('0000000000000005','La casa del pastelero','_CPA040617SN9','','','S','-',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO ctrldoce_casadelpastel.jpem10t (cdperson,cdcontac,dsfirst,dslast,dsmail,dstelloc,dstelmov,cdusuari,programa,tmstmp,dsipfrom) VALUE ('0000000000000005','0000000000000007','Administrador','Sistema','<EMAIL>','','',getUser(),'START-BATCH',CURRENT_TIMESTAMP,'0.0.0.0'); INSERT INTO ctrldoce_casadelpastel.jusm01t (cdperson,cdcontac,cdidegrp,cdusulog,dsvalenc,instatus,inusumod,dsipfrom,cdusuari,programa,tmstmp) VALUE ('0000000000000005','0000000000000007','*','<EMAIL>',CONCAT(MD5(CONCAT('0000000000000007',CURRENT_TIMESTAMP)),MD5('sys12*CPAsn9')),'A','0','0.0.0.0',getUser(),'START-BATCH',CURRENT_TIMESTAMP); <file_sep>/src/java/com/ftc/gedoc/bo/impl/CEPArchivoBOImpl.java package com.ftc.gedoc.bo.impl; import com.ftc.aq.Comunes; import com.ftc.gedoc.bo.CEPArchivoBO; import com.ftc.gedoc.bo.ContactoBO; import com.ftc.gedoc.dao.CEPArchivoDAO; import com.ftc.gedoc.dao.CEPCabeceraDAO; import com.ftc.gedoc.dao.CEPConceptoDAO; import com.ftc.gedoc.dao.CEPPagoDAO; import com.ftc.gedoc.dao.impl.CEPArchivoDAOImpl; import com.ftc.gedoc.dao.impl.CEPCabeceraDAOImpl; import com.ftc.gedoc.dao.impl.CEPConceptoDAOImpl; import com.ftc.gedoc.dao.impl.CEPPagoDAOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Contacto; import com.ftc.services.invoice.modelo.CEPArchivo; import com.ftc.services.invoice.modelo.CEPCabecera; import com.ftc.services.invoice.modelo.CEPConcepto; import com.ftc.services.invoice.modelo.CEPPago; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; import java.util.Date; import java.util.List; public class CEPArchivoBOImpl implements CEPArchivoBO { @Override public CEPArchivo registraCEP(CEPArchivo cep, CEPCabecera xml) throws GeDocBOException { try{ // 1. Insertar la cabecera CEPArchivoDAO dao = new CEPArchivoDAOImpl(); cep.setIdentificador(Comunes.toMD5(cep.getArchivos().concat(String.valueOf(Calendar.getInstance().getTime())))); cep = dao.registraCEP(cep); if(cep==null){ throw new GeDocBOException("No se ha logrado generar el registro del archivo. El resultado es nulo. "); } // 2. Insertar el contenido XML CEPCabeceraDAO daoCabecera = new CEPCabeceraDAOImpl(); xml.setIdentificador(cep.getIdentificador()); xml = daoCabecera.registraCEP(xml); if (xml==null){ dao.eliminaCEP(cep.getIdentificador()); throw new GeDocBOException("No se ha logrado insertar la cabecera del CEP. El resultado fue nulo.".concat(cep.getIdentificador())); } CEPConceptoDAO daoConcepto = new CEPConceptoDAOImpl(xml.getUuid()); for (CEPConcepto concepto: xml.getConceptos()){ daoConcepto.registraCEPConcepto(concepto); } CEPPagoDAO daoPago = new CEPPagoDAOImpl(xml.getUuid()); for (CEPPago pago: xml.getPagos()){ daoPago.registraPago(pago); } return cep; // 3. Exception si alguno falla } catch(GeDocDAOException e){ throw new GeDocBOException(e.getMessage(), e); } } @Override public List<CEPArchivo> listaCEP(String persona) throws GeDocBOException { try{ CEPArchivoDAO dao = new CEPArchivoDAOImpl(); List<CEPArchivo> archivos = dao.listar(persona); return archivos; }catch(GeDocDAOException e){ throw new GeDocBOException(e.getMessage(), e); } } @Override public CEPArchivo obtieneCEP(String identificador) throws GeDocBOException { try{ CEPArchivoDAO dao = new CEPArchivoDAOImpl(); CEPArchivo archivo = dao.obtieneCEP(identificador); return archivo; }catch(GeDocDAOException e){ throw new GeDocBOException(e.getMessage(), e); } } /*** * * @param user Código de usuario de quien sube el documento CEP, y que tiene que ser el asociado a la persona * en caso de que se haya logado un usuario del proveedor * @param fechaInicial fecha desde para búsqueda de archivos entre un rango de fechas * @param fechaFinal fecha hasta para búsqueda de archivos entre un rango de fechas * @return Listado de contenidos del documento CEP.XML * @throws GeDocBOException */ @Override public List<CEPArchivo> listaCEPByUser(String user, String fechaInicial, String fechaFinal) throws GeDocBOException { String cdPersona = ""; Contacto contacto = null; // localiza al cdperson del usuario: ContactoBO bo = new ContactoBOImpl(); contacto = bo.buscarPorCorreo(user); if (contacto!=null){ cdPersona = contacto.getPersona(); } else { cdPersona = "*"; } List<CEPArchivo> ceps = listaCEP(cdPersona, fechaInicial, fechaFinal); List<CEPArchivo> subList = new ArrayList<CEPArchivo>(); for (CEPArchivo cep:ceps){ if (cep.getUsuario().equals(user)){ subList.add(cep); } } return subList; } @Override public List<CEPArchivo> listaCEP(String persona, String fechaInicio, String fechaFinal) throws GeDocBOException { try{ CEPArchivoDAO dao = new CEPArchivoDAOImpl(); SimpleDateFormat dateFormat = new SimpleDateFormat("dd/MM/yyyy"); List<CEPArchivo> cabeceras; if (fechaInicio!=null && fechaInicio.length()>0){ Date fInicial = dateFormat.parse(fechaInicio); fechaInicio = Comunes.formatoFecha(fInicial, -3); Date fFinal = dateFormat.parse(fechaFinal); fechaFinal = Comunes.formatoFecha(fFinal, -3); cabeceras = dao.listar(persona, fechaInicio, fechaFinal); } else { cabeceras = dao.listar(persona); } return cabeceras; }catch(GeDocDAOException e){ throw new GeDocBOException("Ha ocurrido un fallo al listar los archivos CEP." + e.getMessage(), e); } catch(ParseException e){ throw new GeDocBOException("Imposible listar los archivos CEP, lo se reconocen las fechas. " + fechaInicio + "," + fechaFinal, e); } } @Override public CEPArchivo actualizaCEP(String identificador, String estatus) throws GeDocBOException { try{ CEPArchivoDAO dao = new CEPArchivoDAOImpl(); CEPArchivo cep = dao.obtieneCEP(identificador); if (cep!=null){ cep.setEstatus(estatus); cep = dao.actualizaCEP(cep); return cep; } else { throw new GeDocBOException("El CEP no se ha localizado, verifique. "+identificador); } }catch(GeDocDAOException e){ throw new GeDocBOException("Ha ocurrido un fallo al listar los archivos CEP." + e.getMessage(), e); } } } <file_sep>/README.md ftcgedoc ======== Programas de FTC para gestión de documentos factura ftc/files/.config servidor:localhost base:ctrldoce usuario:sysadmindoce contrasenia:Sv6lOu/Vs cliente:_BIO870307QD0 /ftc/ftcgedoc/doce/_BIO870307QD0 <file_sep>/src/java/com/ftc/gedoc/bo/impl/DocumentoBOImpl.java package com.ftc.gedoc.bo.impl; import com.ftc.gedoc.bo.DocumentoBO; import com.ftc.gedoc.dao.DocumentoDAO; import com.ftc.gedoc.dao.impl.DocumentoDAOImpl; import com.ftc.gedoc.exceptions.GeDocBOException; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Documento; import java.util.List; public class DocumentoBOImpl implements DocumentoBO { DocumentoDAO dao = new DocumentoDAOImpl(); @Override public Documento recuperaDocumento(String identificador) throws GeDocBOException { try{ return dao.recuperaDocumento(identificador); }catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public List<Documento> recuperaDocumentos(String elementos) throws GeDocBOException { try{ return dao.recuperaDocumentos(elementos); }catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public int eliminaDocumento(Documento documento) throws GeDocBOException { try{ return dao.eliminaDocumento(documento); }catch(GeDocDAOException e){ throw new GeDocBOException(e); } } @Override public Documento findById(String id) throws GeDocBOException { try{ return dao.findById(id); }catch(GeDocDAOException e){ throw new GeDocBOException(e); } } } <file_sep>/src/java/com/ftc/gedoc/dao/impl/CEPArchivoDAOImpl.java package com.ftc.gedoc.dao.impl; import com.ftc.aq.Conexion; import com.ftc.gedoc.dao.CEPArchivoDAO; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.UtilDAO; import com.ftc.services.invoice.modelo.CEPArchivo; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Calendar; import java.util.List; public class CEPArchivoDAOImpl implements CEPArchivoDAO { private final static String SELECT = "SELECT idceparc, cdperson, dstitulo, dsobserv, dsrefarc, dsstatus FROM cep_cabecera "; @Override public CEPArchivo registraCEP(CEPArchivo archivo) throws GeDocDAOException { Connection conexion = null; try{ conexion = Conexion.getConexion(); //registra el CEPArchivo StringBuilder sql = new StringBuilder("INSERT INTO cep_cabecera VALUES ("); sql.append(UtilDAO.coverSimpleTildes(archivo.getIdentificador())).append(", "); sql.append(UtilDAO.coverSimpleTildes(archivo.getPersona())).append(", "); sql.append(UtilDAO.coverSimpleTildes(archivo.getTitulo())).append(", "); // ņapa complemento para registrar al usuario correctamente String user = ""; if (archivo.getObservaciones().contains(":")){ user = archivo.getObservaciones().substring(0, archivo.getObservaciones().indexOf(":")); archivo.setObservaciones(archivo.getObservaciones().substring(archivo.getObservaciones().indexOf(":")+1)); } sql.append(UtilDAO.coverSimpleTildes(archivo.getObservaciones())).append(", "); sql.append(UtilDAO.coverSimpleTildes(archivo.getArchivos())).append(", "); sql.append(UtilDAO.coverSimpleTildes("P")).append(", "); sql.append(user.length()>0?UtilDAO.coverSimpleTildes(user):"getUser()").append(", "); sql.append(UtilDAO.coverSimpleTildes(CEPArchivoDAOImpl.class.getCanonicalName())).append(", "); sql.append("CURRENT_TIMESTAMP"); sql.append(");"); System.out.println("SQL FOR HEADER: "+sql.toString()); PreparedStatement statement = conexion.prepareStatement(sql.toString()); int i = statement.executeUpdate(); return archivo; }catch(SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } @Override public CEPArchivo actualizaCEP(CEPArchivo archivo) throws GeDocDAOException { Connection conexion = null; try{ conexion = Conexion.getConexion(); //CEPArchivo almacenado = obtieneCEP(archivo.getIdentificador()); //if (almacenado!=null){ StringBuilder sql = new StringBuilder("UPDATE cep_cabecera SET "); sql.append("dstitulo = ").append(UtilDAO.coverSimpleTildes(archivo.getTitulo())).append(", "); sql.append("dsobserv = ").append(UtilDAO.coverSimpleTildes(archivo.getObservaciones())).append(", "); sql.append("dsrefarc = ").append(UtilDAO.coverSimpleTildes(archivo.getArchivos())).append(", "); sql.append("dsstatus = ").append(UtilDAO.coverSimpleTildes(archivo.getEstatus())).append(", "); sql.append("programa = ").append(UtilDAO.coverSimpleTildes("UPD:".concat(String.valueOf(Calendar.getInstance().getTimeInMillis())))); sql.append(" WHERE idceparc = ").append(UtilDAO.coverSimpleTildes(archivo.getIdentificador())); sql.append(" AND cdperson = ").append(UtilDAO.coverSimpleTildes(archivo.getPersona())); sql.append(";"); PreparedStatement stm = conexion.prepareStatement(sql.toString()); int res = stm.executeUpdate(); return res>0?archivo:null; //} else { // throw new GeDocDAOException("Imposible actualizar un registro que no existe en la base de datos, verifique el identificador. " + archivo.getIdentificador()); //} } catch(SQLException e){ throw new GeDocDAOException("Imposible actualizar la referencia del archivo CEP", e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){} } } } @Override public boolean eliminaCEP(String identificador) throws GeDocDAOException { StringBuilder sql = new StringBuilder("DELETE FROM cep_cabecera WHERE "); sql.append("idceparc = ").append(UtilDAO.coverSimpleTildes(identificador)); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql.toString()); return stm.execute(); } catch (SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } /*** * Lista los registros de archivos CEP asociados al proveedor. * @param proveedor Codigo de la persona proveedor * @return Lista de archivos CEP registrados * @throws GeDocDAOException */ @Override public List<CEPArchivo> listar(String proveedor) throws GeDocDAOException { StringBuilder sql = new StringBuilder("SELECT "); sql.append("A.idceparc, A.cdperson, dstitulo, dsobserv, dsrefarc, dsstatus, B.fecha, B.uuid, A.cdusuari, dsrazsoc "); sql.append(" FROM cep_cabecera A "); sql.append(" INNER JOIN cep B "); sql.append(" ON B.idceparc = A.idceparc "); sql.append(" INNER JOIN jpem00t C "); sql.append(" ON A.cdperson = C.cdperson "); if (!proveedor.equals("*")){ sql.append(" WHERE A.cdperson = ").append(UtilDAO.coverSimpleTildes(proveedor)); } sql.append(";"); System.out.println("-----> GET CEP ".concat(sql.toString())); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql.toString()); ResultSet rst = stm.executeQuery(); List<CEPArchivo> lista = new ArrayList<CEPArchivo>(); while(rst.next()){ CEPArchivo cep = mapCEP(rst); lista.add(cep); } return lista; } catch (SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } private CEPArchivo mapCEP(ResultSet rst) throws SQLException { CEPArchivo archivo = new CEPArchivo(); archivo.setIdentificador(rst.getString(1)); archivo.setPersona(rst.getString(2)); archivo.setTitulo(rst.getString(3)); archivo.setObservaciones(rst.getString(4)); archivo.setArchivos(rst.getString(5)); archivo.setEstatus(rst.getString(6)); if (rst.getTimestamp(7)!=null){ archivo.setFecha(rst.getTimestamp(7)); } if (rst.getString(8)!=null){ archivo.setUuid(rst.getString(8)); } if (rst.getString(9)!=null){ archivo.setUsuario(rst.getString(9)); } archivo.setNombre(rst.getString(10)); return archivo; } @Override public CEPArchivo obtieneCEP(String identificador) throws GeDocDAOException { StringBuilder sql = new StringBuilder("SELECT A.idceparc, cdperson, dstitulo, dsobserv, dsrefarc, dsstatus, B.fecha, B.uuid, cdusuari FROM cep_cabecera A"); sql.append(" INNER JOIN cep B "); sql.append(" ON B.idceparc = A.idceparc "); sql.append(" WHERE A.idceparc = ").append(UtilDAO.coverSimpleTildes(identificador)).append(";"); System.out.println("-----> GET CEP ".concat(sql.toString())); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql.toString()); ResultSet rst = stm.executeQuery(); if (rst.next()){ return mapCEP(rst); } else { return null; } } catch (SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } @Override public List<CEPArchivo> listar(String proveedor, String fechaInicial, String fechaFinal) throws GeDocDAOException { StringBuilder sql = new StringBuilder("SELECT "); sql.append("A.idceparc, A.cdperson, dstitulo, dsobserv, dsrefarc, dsstatus, B.fecha, B.uuid, A.cdusuari, dsrazsoc "); sql.append(" FROM cep_cabecera A "); sql.append(" INNER JOIN cep B "); sql.append(" ON B.idceparc = A.idceparc "); sql.append(" INNER JOIN jpem00t C "); sql.append(" ON A.cdperson = C.cdperson "); StringBuilder where = new StringBuilder(); if (!proveedor.equals("*")){ where = where.append(" WHERE A.cdperson = ").append(UtilDAO.coverSimpleTildes(proveedor)); } if (fechaInicial!=null && fechaInicial.length()>0){ if (where.length()>0){ where.append(" AND "); } else { where.append(" WHERE "); } where.append(" (B.fecha BETWEEN ").append(UtilDAO.coverSimpleTildes(fechaInicial)); where.append(" AND "); where.append(UtilDAO.coverSimpleTildes(fechaFinal)).append(")"); } sql.append(where); sql.append(";"); System.out.println("-----> GET CEP BETWEEN DATES: ".concat(sql.toString())); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql.toString()); ResultSet rst = stm.executeQuery(); List<CEPArchivo> listado = new ArrayList<CEPArchivo>(); while (rst.next()){ CEPArchivo cep = mapCEP(rst); listado.add(cep); } return listado; } catch (SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } } <file_sep>/db/ctrldoce-seguridad.sql -- -------------------------------------------------------------------------- -- -- ctrldoce-version 1.2 -- Adecuacion manejo de grupos -- Registro de cambios: -- Modificacion de la tabla ctrldoce.jgrm01t -- . cambiar tamanio del campo cdidegrp de 1 a 16 -- Modificacion de la tabla ctrldoce.jusm01t -- . cambiar tamanio del campo cdidegrp de 1 a 16 -- Incluir tabla de operaciones kaqcopt -- . -- Manejo actual: -- Seguridad en grupos: -- Registro Registro Viáticos a vendedores Usuarios -- Contacto Contacto Caja chica Cambiar passwd -- Ver documento Ver documento Aduanales Suspender/eliminar usuarios -- Subir documento Subir documento Activar usuarios -- Notificacion Notificacion Gestor de grupos -- Estado Estado -- Eliminar documento Eliminar documento -- Descargar documentos Descargar documentos -- GRUPOS POR DEFECTO: [RCVSNEXD.RCVSNEXD.THAS.UCSAG] 0 -- P Proveedores: [01110011.00000000.0000.00000] 28672 -> 7405568 : 15073280 -- Gestor proveedores [11110011.00000000.0000.00000] 64512 -> 2064384 : 31850496 -- C Clientes: [00000000.01100001.0000.00000] 128 -> 24832 : 49664 -- Gestor clientes: [00000000.11110011.0000.00000] 1008 -> 8064 : 124416 -- A Administrador: [00000000.00000000.0000.11111] 7 -> 31 : 31 -- Gestor no administrador: [11111111.11111111.1111.00000] 65520 -> 2072448 : 33554400 -- * Control total: [11110011.11110011.1111.11111] 62415 -> 15790335: 31975423 -- G Gestor de gastos [00000000.00000000.1111.00000] -> 224 : 480 -- 11111100000000000 -- UPDATE jgrm01t SET ingrpmod = 31975423 WHERE cdidegrp = '*'; -- UPDATE jgrm01t SET ingrpmod = 31 WHERE cdidegrp = 'A'; -- UPDATE jgrm01t SET ingrpmod = 49664 WHERE cdidegrp = 'C'; -- UPDATE jgrm01t SET ingrpmod = 480 WHERE cdidegrp = 'G'; -- UPDATE jgrm01t SET ingrpmod = 15073280 WHERE cdidegrp = 'P'; -- -------------------------------------------------------------------------- -- INSERT INTO jgrm01t (cdidegrp,dsidegrp,ingrpmod,cdusuari,programa,tmstmp) VALUES ('G', 'Gestor Gastos','224','sysadmindoce','manual',CURRENT_TIMESTAMP); ALTER TABLE jgrm01t MODIFY COLUMN cdidegrp VARCHAR(32) NOT NULL; ALTER TABLE jusm01t MODIFY COLUMN cdidegrp VARCHAR(32) NOT NULL; CREATE TABLE jgrm01t ( cdidegrp CHAR(1) NOT NULL, dsidegrp VARCHAR(35) NOT NULL, ingrpmod INT NOT NULL DEFAULT 0, cdusuari VARCHAR(16) NOT NULL, programa VARCHAR(45) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(cdidegrp) -- )ENGINE=INNODB; -- CREATE TABLE kaqcopt ( -- cdideope CHAR(1) NOT NULL, -- inmodulo ENUM('CTE','PVR','ADM',' ') NOT NULL, -- dsideope VARCHAR(120) NOT NULL, -- cdusuari VARCHAR(32) NOT NULL, -- tmstmp DATETIME NOT NULL, -- PRIMARY KEY(cdideope) -- ); DROP PROCEDURE IF EXISTS registraGrupo; CREATE PROCEDURE registraGrupo(IN grupo VARCHAR(32), OUT referencia VARCHAR(32), OUT error VARCHAR(200)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdidegrp) INTO existe FROM jgrm01t WHERE dsidegrp = grupo; SET referencia = ''; SET error = ''; IF(existe>0)THEN SET error = 'El registro ya existe, no se puede ingresar.'; ELSE SELECT MD5(grupo) INTO referencia; INSERT INTO jgrm01t VALUES (referencia,grupo,0,'sysadmindoce','sp_',CURRENT_TIMESTAMP); SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS actualizaGrupo; CREATE PROCEDURE actualizaGrupo(IN identificador VARCHAR(32), IN grupo VARCHAR(35), IN modo INT, OUT error VARCHAR(200)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdidegrp) INTO existe FROM jgrm01t WHERE dsidegrp = grupo; SET error = ''; IF(existe<=0)THEN SET error = 'El registro no existe, no se puede actualizar.'; ELSE UPDATE jgrm01t SET dsidegrp = grupo, ingrpmod = modo WHERE cdidegrp = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS eliminaGrupo; CREATE PROCEDURE eliminaGrupo(IN identificador VARCHAR(32), OUT error VARCHAR(200)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdidegrp) INTO existe FROM jgrm01t WHERE cdidegrp = identificador; SET error = ''; IF(existe<=0)THEN SET error = 'El registro no existe, no se puede eliminar.'; ELSE DELETE FROM jgrm01t WHERE cdidegrp = identificador; SET error = ''; END IF; END; // DROP PROCEDURE IF EXISTS actualizaGrupoContacto; CREATE PROCEDURE actualizaGrupoContacto(IN contacto CHAR(16), IN grupo VARCHAR(32), IN sesion VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE existe INT DEFAULT 0; SELECT COUNT(cdusulog) INTO existe FROM jusm01t WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); IF(existe>0)THEN UPDATE jusm01t SET cdidegrp = grupo, programa = CONCAT('NWGRP',grupo,obtieneIp(sesion),obtieneUsuario(sesion)) WHERE cdusulog = (SELECT dsmail FROM jpem10t WHERE cdcontac = contacto); SET error = ''; ELSE SET error = 'Error 50033: No existe el registro especificado.'; END IF; END; // DROP PROCEDURE IF EXISTS preRegistroContacto; CREATE PROCEDURE preRegistroContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo VARCHAR(32), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE referencia VARCHAR(32); DECLARE error VARCHAR(250); CALL registraContacto(persona,primero,segundo,correo,telefono,movil,grupo,valenc,'preregistro',@referencia,@error); IF(referencia<>'')THEN SET referencia = @referencia; UPDATE jpem01t SET instatus = 'P' WHERE cdperson = persona AND cdcontac = @referencia; ELSE SET error = @error; END IF; END; // DROP PROCEDURE IF EXISTS registraContacto; CREATE PROCEDURE registraContacto(IN persona CHAR(16), IN primero VARCHAR(35), IN segundo VARCHAR(60), IN correo VARCHAR(100), in telefono VARCHAR(60), IN movil VARCHAR(14), IN grupo VARCHAR(32), IN valenc VARCHAR(16),IN sesion VARCHAR(32), OUT referencia CHAR(16), OUT error VARCHAR(250)) BEGIN DECLARE isdupkey INT DEFAULT 0; BEGIN DECLARE existe INT DEFAULT 0; DECLARE cts DATETIME; DECLARE EXIT HANDLER FOR 1062 SET isdupkey = 1; SELECT COUNT(dsmail) INTO existe FROM jpem10t WHERE dsmail = correo; IF (existe>0)THEN SET error = 'Error 50031: Este correo electrónico ya se encuentra registrado, no se puede volver a registrar.'; SET referencia = ''; ELSE SELECT getCodigoApp('CT') INTO referencia; SELECT CURRENT_TIMESTAMP INTO cts; INSERT INTO jpem10t (cdperson,cdcontac,dsfirst,dslast,dsmail,dstelloc,dstelmov,cdusuari,programa,tmstmp,dsipfrom) VALUES (persona,referencia,primero,segundo,correo,telefono,movil,obtieneUsuario(sesion),'registraContacto',cts,obtieneIp(sesion)); INSERT INTO jusm01t (cdperson,cdcontac,cdidegrp,cdusulog,dsvalenc,instatus,inusumod,dsipfrom,cdusuari,programa,tmstmp) VALUES (persona,referencia,grupo,correo,CONCAT(MD5(CONCAT(referencia,cts)),MD5(valenc)),'A',0,obtieneIp(sesion),obtieneUsuario(sesion),'registraContacto',cts); SET error = ''; END IF; END; IF (isdupkey>0)THEN SET referencia = ''; SET error = 'Error 50030: Existe una llave dúplicada en personas. Notifique a sistemas.'; END IF; END; // -- ------------------------------------------------------------ -- -- Nueva funcionalidad para manejo de gastos -- -- Administrador de periodos -- -- ------------------------------------------------------------ -- -- Periodo DROP TABLE IF EXISTS jctm11t; DROP TABLE IF EXISTS jctm10t; DROP TABLE IF EXISTS jctm02t; DROP TABLE IF EXISTS jctm01t; CREATE TABLE jctm01t ( idnumper VARCHAR(32) NOT NULL, inanyper INT NOT NULL, innumper INT NOT NULL, dtfecape DATE NOT NULL, dtfeccie DATE NULL, instatus CHAR(1) NOT NULL, dscoment VARCHAR(2000) NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idnumper) )ENGINE=InnoDB; -- Cifra control de periodo cerrado CREATE TABLE jctm02t ( idnumper VARCHAR(32) NOT NULL, innumreg INT NOT NULL, dbmonto DOUBLE NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idnumper), FOREIGN KEY(idnumper) REFERENCES jctm01t(idnumper) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; DROP TABLE IF EXISTS jctm11t; DROP TABLE IF EXISTS jctm10t; DROP TABLE IF EXISTS jctm09t; CREATE TABLE jctm09t ( idreggas VARCHAR(32) NOT NULL, dsasocia VARCHAR(32) NOT NULL, idnumper VARCHAR(32) NOT NULL, intipgas CHAR(2) NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idreggas), FOREIGN KEY(idnumper) REFERENCES jctm01t(idnumper) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; ALTER TABLE jctm09t ADD COLUMN instatus ENUM('A','C',' ') NOT NULL DEFAULT 'A' AFTER intipgas; ALTER TABLE jctm09t ADD COLUMN fefecreg DATE NOT NULL AFTER intipgas; ALTER TABLE jctm09t ADD COLUMN dsdocto VARCHAR(30) NOT NULL DEFAULT '' AFTER fefecreg; ALTER TABLE jctm09t ADD COLUMN dsrefdoc VARCHAR(60) NOT NULL DEFAULT '' AFTER dsdocto; DROP PROCEDURE IF EXISTS registraCabecera; CREATE PROCEDURE registraCabecera(IN periodo VARCHAR(32), IN asociadoA VARCHAR(250), IN tipo VARCHAR(2), IN fecha DATE, IN documento VARCHAR(30), IN referenciaDocumento VARCHAR(60), OUT referencia CHAR(32), OUT error VARCHAR(250)) BEGIN SELECT MD5(getCodigoApp('PC')) INTO referencia; INSERT INTO jctm09t (idreggas,dsasocia,idnumper,intipgas,fefecreg,dsdocto,dsrefdoc,instatus,cdusuari,tmstmp) VALUES (referencia,asociadoA,periodo,tipo,fecha,documento,referenciaDocumento,'A',getUser(),CURRENT_TIMESTAMP); SET error = ''; END; // DROP PROCEDURE IF EXISTS cierraRegistroCabecera; CREATE PROCEDURE cierraRegistroCabecera(IN cabecera VARCHAR(32), OUT estatus CHAR(1), OUT error CHAR(1)) BEGIN DECLARE existe INT; SELECT COUNT(idreggas) INTO existe FROM jctm09t WHERE idreggas = cabecera AND instatus = 'A'; IF(existe>0) THEN -- --> cafaray 221217: AUTORIZACION MASIVA DE DETALLE DE REGISTROS QUE NO FUERON AUTORIZADOS PREVIAMENTE: UPDATE jctm10t SET instatus = 'A', dsautori = 'ok - auto.cierre' WHERE instatus <> 'A' AND IFNULL(dsautori,'') = ''; UPDATE jctm09t SET instatus = 'C' WHERE idreggas = cabecera; SET estatus = 'C'; SET error = ''; ELSE SET estatus = ' '; SET error = 'El registro no existe. Verifique.'; END IF; END // 9BF65976421855BADFB427434A6A41C4 CE01CAF80D66CC93769B1D51C95246D3 -- Registro del periodo CREATE TABLE jctm10t ( idreggas VARCHAR(32) NOT NULL, idregper VARCHAR(32) NOT NULL, dsregper VARCHAR(250) NOT NULL, intipreg CHAR(2) NOT NULL, dtfecreg DATE NOT NULL, dbimpreg DOUBLE NOT NULL DEFAULT 0, dbimpues DOUBLE NOT NULL DEFAULT 0, dsautori VARCHAR(120) NOT NULL DEFAULT '', instatus CHAR(1) NOT NULL, dsnotreg VARCHAR(2000) NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idreggas,idregper), FOREIGN KEY(idreggas) REFERENCES jctm09t(idreggas) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; -- Evidencia de registro de periodo CREATE TABLE jctm11t ( idreggas VARCHAR(32) NOT NULL, idregper VARCHAR(32) NOT NULL, iddocele VARCHAR(32) NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idreggas,idregper), FOREIGN KEY(idreggas,idregper) REFERENCES jctm10t(idreggas,idregper) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; DROP VIEW IF EXISTS jctc10v; CREATE VIEW jctc10v AS SELECT A.idreggas, A.idregper, A.intipreg, A.dtfecreg, A.dbimpreg, A.dbimpues, A.dsautori, A.instatus, A.dsnotreg, B.iddocele, A.dsregper FROM jctm10t A LEFT JOIN jctm11t B ON A.idreggas = B.idreggas AND A.idregper = B.idregper ORDER BY A.dtfecreg; INSERT INTO kaqcidt VALUES ('PD',CURRENT_TIMESTAMP,'Controlador de periodos','0000000000000009','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO kaqcidt VALUES ('PC',CURRENT_TIMESTAMP,'Controlador de periodos-cabecera','0000000000000013','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO kaqcidt VALUES ('RC',CURRENT_TIMESTAMP,'Controlador de periodos-registro','0000000000000013','n',getUser(),'START-BATCH',CURRENT_TIMESTAMP); INSERT INTO jctm01t VALUES ((SELECT getCodigoApp('PD')), 2014, 10, CURRENT_DATE, NULL, 'A', '', getUser(), CURRENT_TIMESTAMP); -- SELECT B.idnumper, B.idregper, B.intipreg, B.dtfecreg, B.dbimpreg, B.dbimpues, B.instatus, B.dsnotreg, D.dsfiles -- FROM jctm10t B INNER JOIN jctm11t C ON B.idnumper = C.idnumper AND B.idregper = C.idregper -- LEFT JOIN jdem10t D ON C.iddocele = D.cddocele -- ORDER BY B.dtfecreg; DROP PROCEDURE IF EXISTS insertaRegistro; CREATE PROCEDURE insertaRegistro(IN cabecera VARCHAR(32), IN descripcion VARCHAR(250), IN tipo VARCHAR(2), IN fecha DATE, IN importe DOUBLE, IN impuesto DOUBLE, IN nota VARCHAR(30), IN autoriza VARCHAR(120), OUT referencia VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE cts DATETIME; SELECT CURRENT_TIMESTAMP INTO cts; SELECT MD5(CONCAT(getCodigoApp('RC'),cts)) INTO referencia; INSERT INTO jctm10t (idreggas,idregper,dsregper,intipreg,dtfecreg,dbimpreg,dbimpues,instatus,dsautori,dsnotreg,cdusuari,tmstmp) VALUES (cabecera,referencia,descripcion,tipo,fecha,importe,impuesto,'P',autoriza,nota,getUser(),CURRENT_TIMESTAMP); SET error = ''; END; // DROP PROCEDURE IF EXISTS insertaEvidencia; CREATE PROCEDURE insertaEvidencia(IN cabecera VARCHAR(32), IN registro VARCHAR(32), IN evidencia VARCHAR(32), OUT referencia VARCHAR(32), OUT error VARCHAR(250)) BEGIN INSERT INTO jctm11t (idreggas,idregper,iddocele,cdusuari,tmstmp) VALUES (cabecera,registro,evidencia,getUser(),CURRENT_TIMESTAMP); SET error = ''; END; // -- CALL insertaRegistro('0000000000000028','Nota de gasto','-1','2014-11-03',125.0,61.72,'Nota','Najar',@referencia,@error); -- SELECT @referencia, @error; DROP PROCEDURE IF EXISTS eliminaRegistro; CREATE PROCEDURE eliminaRegistro(IN registro VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE hayEvidencia INT; SELECT COUNT(iddocele) INTO hayEvidencia FROM jctm11t WHERE idregper = registro; IF (hayEvidencia>0) THEN DELETE FROM jdem20t WHERE cddocele IN (SELECT iddocele FROM jctm11t WHERE idregper = registro); DELETE FROM jctm11t WHERE idregper = registro; END IF; DELETE FROM jctm10t WHERE idregper = registro; SET error = ''; END; // DROP PROCEDURE IF EXISTS registraCifraControl; CREATE PROCEDURE registraCifraControl(IN periodo VARCHAR(16), IN registros INT, IN monto DOUBLE, OUT error VARCHAR(200)) BEGIN DECLARE existePeriodoActivo INT; SELECT COUNT(idnumper) INTO existePeriodoActivo FROM jctm01t WHERE idnumper = periodo AND instatus = 'A'; IF(existePeriodoActivo > 0)THEN SELECT COUNT(idnumper) INTO existePeriodoActivo FROM jctm02t WHERE idnumper = periodo; IF(existePeriodoActivo) THEN UPDATE jctm02t SET innumreg = registros, dbmonto = monto WHERE idnumper = periodo; SET error = ''; ELSE INSERT INTO jctm02t VALUES (periodo, registros,monto,getUser(), CURRENT_TIMESTAMP); SET error = ''; END IF; ELSE SET error = 'El periodo no existe o no esta activo'; END IF; END; // DROP PROCEDURE IF EXISTS cerrarPeriodo; CREATE PROCEDURE cerrarPeriodo(IN periodo VARCHAR(16), IN comentario VARCHAR(2000), OUT fecha DATE, OUT estatus CHAR(1), OUT error VARCHAR(200)) BEGIN DECLARE existePeriodoActivo INT; SELECT COUNT(idnumper) INTO existePeriodoActivo FROM jctm01t WHERE idnumper = periodo AND instatus = 'A'; IF(existePeriodoActivo > 0)THEN SELECT CURRENT_DATE INTO fecha; SELECT 'C' INTO estatus; UPDATE jctm01t SET dscoment = comentario, dtfeccie = fecha, instatus = estatus WHERE idnumper = periodo; SET error = ''; ELSE SET error = 'El periodo no existe o no esta activo'; END IF; END; // -- INSERT INTO jctm01t VALUES ((SELECT getCodigoApp('PD')), 2014, 10, CURRENT_DATE, NULL, 'A', '', getUser(), CURRENT_TIMESTAMP); DROP PROCEDURE IF EXISTS abrirPeriodo; CREATE PROCEDURE abrirPeriodo(IN anio INT, IN periodo INT, OUT identificador VARCHAR(32), OUT fecha DATE, OUT estatus CHAR(1), OUT comentarios VARCHAR(2000), OUT error VARCHAR(200)) BEGIN DECLARE existePeriodoActivo INT; DECLARE fechaHora DATETIME; SELECT COUNT(idnumper) INTO existePeriodoActivo FROM jctm01t WHERE instatus = 'A'; IF(existePeriodoActivo > 0)THEN SET error = 'Existe un periodo activo, no se puede abrir otro, primero cierre el anterior.'; ELSE SELECT MD5(getCodigoApp('PD')) INTO identificador; SELECT CURRENT_DATE INTO fecha; SELECT 'A' INTO estatus; SELECT CURRENT_TIMESTAMP INTO fechaHora; SELECT CONCAT('Se abre el periodo con exito a las ', fechaHora) INTO comentarios; INSERT INTO jctm01t VALUES (identificador, anio, periodo, fecha, NULL, estatus, comentarios, getUser(), CURRENT_TIMESTAMP); SET error = ''; END IF; END; // <file_sep>/src/java/com/ftc/gedoc/dao/impl/CEPCabeceraDAOImpl.java package com.ftc.gedoc.dao.impl; import com.ftc.aq.Comunes; import com.ftc.aq.Conexion; import com.ftc.gedoc.dao.CEPCabeceraDAO; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.UtilDAO; import com.ftc.services.invoice.modelo.CEPCabecera; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Date; import java.util.List; public class CEPCabeceraDAOImpl implements CEPCabeceraDAO { private static final String SELECT_CEP = "SELECT uuid,version,serie,folio,fecha,subTotal,moneda ,total,tipoDeComprobante,lugarExpedicion,xmlnsPago10,rfcEmisor,\n" + " nombreEmisor,regimenFiscalEmisor,rfcReceptor,nombreReceptor,usoCFDIReceptor,rfcProvCertif,versionTibreFiscal,\n" + " fechaTimbrado,noCertificadoSAT,versionPagos,cep.idceparc \n" + " FROM cep \n"; public CEPCabeceraDAOImpl() { } @Override public CEPCabecera registraCEP(CEPCabecera cep) throws GeDocDAOException { Connection conexion = null; try{ conexion = Conexion.getConexion(); //registra la cabecera del registro StringBuilder sql = new StringBuilder("INSERT INTO cep VALUES ("); sql.append(UtilDAO.coverSimpleTildes(cep.getUuid())).append(", "); sql.append(cep.getVersion()).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getSerie())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getFolio())).append(", "); sql.append(UtilDAO.coverSimpleTildes(Comunes.formatoFecha(cep.getFecha(),-3))).append(", "); sql.append(cep.getSubTotal()).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getMoneda())).append(", "); sql.append(cep.getTotal()).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getTipoDeComprobante())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getLugarExpedicion())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getXmlnsPago10())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getRfcEmisor())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getNombreEmisor())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getRegimenFiscalEmisor())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getRfcReceptor())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getNombreReceptor())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getUsoCFDIReceptor())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getRfcProvCertif())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getVersionTimbreFiscal())).append(", "); sql.append(UtilDAO.coverSimpleTildes(Comunes.formatoFecha(cep.getFechaTimbrado(),-3))).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getNoCertificadoSAT())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getVersionPagos())).append(", "); sql.append(UtilDAO.coverSimpleTildes(cep.getIdentificador())); sql.append(");"); System.out.println("SQL for Insert CEP:" + sql.toString()); PreparedStatement statement = conexion.prepareStatement(sql.toString()); statement.executeUpdate(); return cep; }catch(SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } @Override public CEPCabecera actualizaCEP(CEPCabecera cep) throws GeDocDAOException{ eliminaCEP(cep.getUuid()); registraCEP(cep); return cep; } @Override public CEPCabecera obtieneCEP(String uuid) throws GeDocDAOException{ StringBuilder sql = new StringBuilder(SELECT_CEP); sql.append(" WHERE uuid = ").append(UtilDAO.coverSimpleTildes(uuid)).append(";"); System.out.println("-----> GET CEP ".concat(sql.toString())); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql.toString()); ResultSet rst = stm.executeQuery(); if (rst.next()){ return mapCep(rst); } else { return null; } } catch (SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } @Override public void eliminaCEP(String uuid) throws GeDocDAOException{ StringBuilder sql = new StringBuilder("DELETE FROM cep WHERE "); sql.append("uuid = ").append(UtilDAO.coverSimpleTildes(uuid)); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql.toString()); stm.execute(); } catch (SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } /*** * * @param proveedor RFC del emisor del documento que representa al proveedor, y que tiene que ser el asociado a la persona * en caso de que se haya logado un usuario del proveedor * @return Listado de contenidos del documento CEP.XML * @throws GeDocDAOException */ @Override public List<CEPCabecera> listaCEP(String proveedor) throws GeDocDAOException{ StringBuilder sql = new StringBuilder(SELECT_CEP); sql.append(" WHERE rfcEmisor= ").append(UtilDAO.coverSimpleTildes(proveedor)).append(";"); System.out.println("-----> GET CEP ".concat(sql.toString())); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql.toString()); ResultSet rst = stm.executeQuery(); List<CEPCabecera> lista = new ArrayList<CEPCabecera>(); while(rst.next()){ CEPCabecera cep = mapCep(rst); lista.add(cep); } return lista; } catch (SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } /*** * * @param proveedor RFC del emisor del documento que representa al proveedor, y que tiene que ser el asociado a la persona * @param fechaInicial fecha de inicio en el rango de busqueda * @param fechaFinal fecha límite para el rango de busqueda * en caso de que se haya logado un usuario del proveedor * @return Listado de contenidos del documento CEP.XML * @throws GeDocDAOException */ @Override public List<CEPCabecera> listaCEP(String proveedor, Date fechaInicial, Date fechaFinal) throws GeDocDAOException{ StringBuilder sql = new StringBuilder(SELECT_CEP); sql.append(" WHERE rfcEmisor= ").append(UtilDAO.coverSimpleTildes(proveedor)); sql.append(" AND fecha >= ").append(UtilDAO.coverSimpleTildes(Comunes.date2String(fechaInicial, -3))); sql.append(" AND fecha <= ").append(UtilDAO.coverSimpleTildes(Comunes.date2String(fechaFinal, -3))).append(";"); System.out.println("-----> GET CEP ".concat(sql.toString())); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql.toString()); ResultSet rst = stm.executeQuery(); List<CEPCabecera> lista = new ArrayList<CEPCabecera>(); while(rst.next()){ CEPCabecera cep = mapCep(rst); lista.add(cep); } return lista; } catch (SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { if (conexion!=null){ try{ conexion.close(); } catch(SQLException e){ } } } } private CEPCabecera mapCep(ResultSet rst) throws SQLException{ CEPCabecera cep = new CEPCabecera(); cep.setUuid(rst.getString(1)); cep.setVersion(rst.getString(2)); cep.setSerie(rst.getString(3)); cep.setFolio(rst.getString(4)); cep.setFecha(rst.getDate(5)); cep.setSubTotal(rst.getDouble(6)); cep.setMoneda(rst.getString(7)); cep.setTotal(rst.getDouble(8)); cep.setTipoDeComprobante(rst.getString(9)); cep.setLugarExpedicion(rst.getString(10)); cep.setXmlnsPago10(rst.getString(11)); cep.setRfcEmisor(rst.getString(12)); cep.setNombreEmisor(rst.getString(13)); cep.setRegimenFiscalEmisor(rst.getString(14)); cep.setRfcReceptor(rst.getString(15)); cep.setNombreReceptor(rst.getString(16)); cep.setUsoCFDIReceptor(rst.getString(17)); cep.setRfcProvCertif(rst.getString(18)); cep.setVersionTimbreFiscal(rst.getString(19)); cep.setFechaTimbrado(rst.getDate(20)); cep.setNoCertificadoSAT(rst.getString(21)); cep.setVersionPagos(rst.getString(22)); cep.setIdentificador(rst.getString(23)); return cep; } @Override public boolean existeUUID(String uuid) throws GeDocDAOException { String sql = "SELECT COUNT(uuid) FROM cep WHERE uuid = "; sql = sql.concat(UtilDAO.coverSimpleTildes(uuid)); Connection conexion = null; try{ conexion = Conexion.getConexion(); PreparedStatement stm = conexion.prepareStatement(sql); ResultSet rst = stm.executeQuery(); if (rst.next()){ int count = rst.getInt(1); return count>0; } else { return false; } }catch(SQLException e){ throw new GeDocDAOException(e.getMessage(), e); } finally { try{ if (conexion!=null){ conexion.close(); } }catch(SQLException e){} } } } <file_sep>/src/java/com/ftc/gedoc/dao/PeriodoDAO.java package com.ftc.gedoc.dao; import com.ftc.gedoc.exceptions.GeDocDAOException; import com.ftc.gedoc.utiles.Periodo; import com.ftc.gedoc.utiles.PeriodoCabecera; import com.ftc.gedoc.utiles.PeriodoCifraControl; import com.ftc.gedoc.utiles.PeriodoRegistro; import com.ftc.gedoc.utiles.CifraControl; import com.ftc.gedoc.utiles.CifraControlAjuste; import java.util.Date; import java.util.List; import java.util.Map; public interface PeriodoDAO { List<Periodo> listado() throws GeDocDAOException; Periodo abrir(Periodo periodo) throws GeDocDAOException; Periodo cerrar(Periodo periodo) throws GeDocDAOException; Periodo encuentraPorId(String id) throws GeDocDAOException; Periodo activo()throws GeDocDAOException; List<PeriodoRegistro> listaRegistrosPeriodo(Periodo periodo)throws GeDocDAOException; PeriodoCifraControl obtieneCifraControl(String id) throws GeDocDAOException; PeriodoCifraControl insertaCifraControl(String id, PeriodoCifraControl cifraControl) throws GeDocDAOException; PeriodoCifraControl actualizaCifraControl(String id, PeriodoCifraControl cifraControl) throws GeDocDAOException; List<PeriodoCabecera> listaCabeceras(String id) throws GeDocDAOException; List<PeriodoCabecera> listaCabeceras(String id, String tipoGasto) throws GeDocDAOException; //List<PeriodoCabecera> listaCabecerasConImporte(String id, String... params) throws GeDocDAOException; List<PeriodoCabecera> listaCabecerasConImporte(String id, String tipoGasto, String... params) throws GeDocDAOException; PeriodoCabecera registraCabecera(String id, PeriodoCabecera periodoCabecera) throws GeDocDAOException; void eliminaCabecera(PeriodoCabecera periodoCabecera) throws GeDocDAOException; void eliminaCabecera(String idCabecera) throws GeDocDAOException; void eliminaCabeceras(String id) throws GeDocDAOException; PeriodoCabecera encuentraCabeceraPorId(String idCabecera) throws GeDocDAOException; PeriodoCabecera cierraCabecera(String idCabecera) throws GeDocDAOException; PeriodoCabecera encuentraPorRegisro(String idRegistro) throws GeDocDAOException; List<PeriodoRegistro> listaRegistros(String id) throws GeDocDAOException; List<PeriodoRegistro> insertaRegistros(String id, List<PeriodoRegistro> periodoRegistros) throws GeDocDAOException; PeriodoRegistro insertaRegistro(String id, PeriodoRegistro periodoRegistro) throws GeDocDAOException; void eliminaRegistro(PeriodoRegistro periodoRegistro) throws GeDocDAOException; void eliminaRegistros(String id) throws GeDocDAOException; PeriodoRegistro encuentraRegistroPorId(String idRegistro) throws GeDocDAOException; List<PeriodoRegistro> pendientesAprobacion(String idCabecera) throws GeDocDAOException; List<String> listaAsignados() throws GeDocDAOException; Map<String, String> listadoTipoComprobante(String tipoGasto) throws GeDocDAOException; PeriodoRegistro actualizaTipoComprobanteRegistro(PeriodoRegistro periodoRegistro, String tipoComprobante) throws GeDocDAOException; PeriodoRegistro actualizaAutorizaRegistro(PeriodoRegistro periodoRegistro, String autoriza) throws GeDocDAOException; PeriodoRegistro actualizaEstadoRegistro(PeriodoRegistro periodoRegistro, String estado) throws GeDocDAOException; CifraControl getCifraControl(String idPeriodo) throws GeDocDAOException; Periodo encuentraPorFecha(String valor) throws GeDocDAOException; CifraControl cierreCifraControl(Periodo periodo) throws GeDocDAOException; String cierreCifraControlAjuste(Periodo periodo) throws GeDocDAOException; List<CifraControlAjuste> getCifraControlAjuste(String idPeriodo) throws GeDocDAOException; PeriodoCabecera cierraCabeceraAjuste(String idCabecera) throws GeDocDAOException; Periodo getPeriodo(int any, int mes) throws GeDocDAOException; List<PeriodoCabecera> getCabecerasAgrupadasPorAsociado(String idPeriodo) throws GeDocDAOException; List<PeriodoCabecera> getCabecerasPorAsociado(String idPeriodo, String asociado) throws GeDocDAOException; List<PeriodoCabecera> getCabeceras(String...params) throws GeDocDAOException; } <file_sep>/web/js/uploadFiles.js $(function() { // Variable to store your files var files = new Array(); var x = 0; // Add events //$('input[type=file]').on('change', prepareUpload); //$('form').on('submit', uploadFiles); $("#enviar").button().click(function(event){ event.preventDefault(); uploadFiles(event); }); var cont = 1; $("#adRow").button().click(function(event) { event.preventDefault(); var newTxt = $('<li><label for="gasto_xml_'+cont+'">XML </label><input type="file" name="gasto_xml_'+cont+'" class="inputfilechange" id="gasto_xml_'+cont+'" multiple><br /><label for="gasto_pdf_'+cont+'">PDF </label><input type="file" name="gasto_pdf_'+cont+'" class="inputfilechange" id="gasto_pdf_'+cont+'" multiple></li>'); $("#a1").append(newTxt); cont++; //$(".inputfilechange").on('change', function(e){prepareUpload(e)} ); $(".inputfilechange").on('change', prepareUpload); }); // Grab the files and set them to our variable $(".inputfilechange").on('change', prepareUpload); function prepareUpload(event) { files[x++] = event.target.files; } // Catch the form submit and upload the files function uploadFiles(event) { //console.info(event); event.stopPropagation(); // Stop stuff happening event.preventDefault(); // Totally stop stuff happening // START A LOADING SPINNER HERE // Create a formdata object and add the files //var data = new FormData(); var data = new FormData($("#FORM_LOAD_FILES")[0]); //$.each(files, function(key, value) { // data.append(key, value); //}); console.info(data); $.ajax({ url: '../ws/files/upload.do?files', type: 'POST', data: data, cache: false, processData: false, // Don't process the files contentType: false, // Set content type to false as jQuery will tell the server its a query string request success: function(data, textStatus, jqXHR) { if (typeof data.error === 'undefined') { // Success so call function to process the form console.log("DATA: " + data); submitForm(event, data); } else { // Handle errors here console.log('ERRORS: ' + data.error); } }, error: function(jqXHR, textStatus, errorThrown) { // Handle errors here console.log('ERRORS: ' + textStatus); // STOP LOADING SPINNER } }); } function submitForm(event, data) { // Create a jQuery object from the form $form = $(event.target); // Serialize the form data var formData = $form.serialize(); // You should sterilise the file names $.each(data.files, function(key, value) { formData = formData + '&filenames[]=' + value; }); $.ajax({ url: 'submit.php', type: 'POST', data: formData, cache: false, dataType: 'json', success: function(data, textStatus, jqXHR) { if (typeof data.error === 'undefined') { // Success so call function to process the form console.log('SUCCESS: ' + data.success); } else { // Handle errors here console.log('ERRORS: ' + data.error); } }, error: function(jqXHR, textStatus, errorThrown) { // Handle errors here console.log('ERRORS: ' + textStatus); }, complete: function() { // STOP LOADING SPINNER } }); } }); <file_sep>/db/periodos.sql -- GENERACION DE PERIODOS -- PASO 0: INSERT INTO jctm01t SELECT getCodigoApp('PD'), YEAR(fefecreg) anyper, DATE_FORMAT(fefecreg, '%m') numper, CURRENT_DATE, NULL, 'A', 'Procesamiento de periodos Paso 1', 'admin', CURRENT_TIMESTAMP FROM jctm09t -- WHERE anyper <> '2014' AND numper <> '10' GROUP BY anyper, numper ORDER BY anyper, numper; -- SELECT a.idnumper, fefecreg, b.idnumper, b.inanyper, b.innumper -- FROM jctm09t a INNER JOIN jctm01t b -- ON YEAR(a.fefecreg) = b.inanyper AND MONTH(a.fefecreg) = b.innumper; UPDATE jctm09t SET idnumper = ( SELECT b.idnumper FROM jctm01t b WHERE YEAR(fefecreg) = b.inanyper AND MONTH(fefecreg) = b.innumper ) -- PASO 1 UPDATE jctm10t SET dsautori = 'AUT:PASO 1', instatus = 'A' WHERE idreggas IN (SELECT idreggas FROM jctm09t WHERE instatus = 'C') SELECT a.idreggas, dsautori, a.instatus, b.instatus, fefecreg FROM jctm10t b INNER JOIN jctm09t a ON a.idreggas = b.idreggas WHERE a.instatus ='C' ORDER BY b.tmstmp DESC; UPDATE jctm10t SET dsautori = 'AUT:PASO 2', instatus = 'A' WHERE dsautori = '' AND idreggas IN (SELECT idreggas FROM jctm09t WHERE fefecreg < '2017-12-01'); SELECT * FROM jctm10t WHERE dsautori = 'ok'; UPDATE jctm09t SET instatus = 'C' WHERE idreggas IN ( SELECT idreggas FROM jctm10t WHERE dsautori LIKE '%PASO 2' ); -- SELECT a.fefecreg, b.dtfecreg, b.dsregper FROM jctm09t a INNER JOIN jctm10t b ON a.idreggas = b.idreggas AND b.dsautori LIKE '%PASO 2'; -- ACTUALIZACION y CIERRE de PERIODO UPDATE jctm01t SET instatus = 'C', dtfeccie = CURRENT_DATE, cdusuari = getUser(), dscoment = 'CIERRE DE PERIODO' WHERE idnumper <> '0000000000000234'; -- ALTERAR EL PROCEDIMIENTO PARA DEJAR PENDIENTES DE AUTORIZAR LOS REGISTROS DE GASTOS DROP PROCEDURE IF EXISTS insertaRegistro; CREATE PROCEDURE insertaRegistro(IN cabecera VARCHAR(32), IN descripcion VARCHAR(250), IN tipo VARCHAR(2), IN fecha DATE, IN importe DOUBLE, IN impuesto DOUBLE, IN nota VARCHAR(30), IN autoriza VARCHAR(120), OUT referencia VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE cts DATETIME; SELECT CURRENT_TIMESTAMP INTO cts; SELECT MD5(CONCAT(getCodigoApp('RC'),cts)) INTO referencia; INSERT INTO jctm10t (idreggas,idregper,dsregper,intipreg,dtfecreg,dbimpreg,dbimpues,instatus,dsautori,dsnotreg,cdusuari,tmstmp) VALUES (cabecera,referencia,descripcion,tipo,fecha,importe,impuesto,'P',autoriza,nota,getUser(),CURRENT_TIMESTAMP); SET error = ''; END; // DROP TABLE IF EXISTS jctm02t; CREATE TABLE jctm02t ( idnumper VARCHAR(32) NOT NULL, dbmonto DOUBLE NOT NULL, inregaso INT NOT NULL, inregtip INT NOT NULL, dbmaxaso DOUBLE NOT NULL, dbminaso DOUBLE NOT NULL, dbmaxtip DOUBLE NOT NULL, dbmintip DOUBLE NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idnumper), FOREIGN KEY(idnumper) REFERENCES jctm01t(idnumper) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; DROP TABLE IF EXISTS jctm2at; CREATE TABLE jctm2at ( idnumper VARCHAR(32) NOT NULL, dsasocia VARCHAR(32) NOT NULL, innumreg INT NOT NULL DEFAULT 0, dbmonto DOUBLE NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idnumper, dsasocia), FOREIGN KEY(idnumper) REFERENCES jctm01t(idnumper) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; DROP TABLE IF EXISTS jctm2tt; CREATE TABLE jctm2tt ( idnumper VARCHAR(32) NOT NULL, intipgas VARCHAR(1) NOT NULL, innumreg INT NOT NULL DEFAULT 0, dbmonto DOUBLE NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idnumper, intipgas), FOREIGN KEY(idnumper) REFERENCES jctm01t(idnumper) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; -- tablas de ajustes a periodo CREATE TABLE jctm03t ( idajuste VARCHAR(32) NOT NULL, fefecaju DATE NOT NULL, idnumper VARCHAR(32) NOT NULL REFERENCES jctm01t(idnumper), cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idajuste) )ENGINE=INNODB; -- tablas de ajustes a periodo - registros CREATE TABLE jctm3rt ( idajuste VARCHAR(32) NOT NULL, idreggas VARCHAR(32) NOT NULL REFERENCES jctm09t(idreggas), PRIMARY KEY(idajuste, idreggas) )ENGINE=INNODB; -- tablas de ajustes a periodo - cc CREATE TABLE jctm3ct ( idajuste VARCHAR(32) NOT NULL, fefecaju DATE NOT NULL, dbmonto DOUBLE NOT NULL, inregaso INT NOT NULL, inregtip INT NOT NULL, dbmaxaso DOUBLE NOT NULL, dbminaso DOUBLE NOT NULL, dbmaxtip DOUBLE NOT NULL, dbmintip DOUBLE NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idajuste), FOREIGN KEY(idajuste) REFERENCES jctm03t(idajuste) ON DELETE NO ACTION ON UPDATE NO ACTION )ENGINE=INNODB; DROP TABLE IF EXISTS jctm3at; CREATE TABLE jctm3at ( idajuste VARCHAR(32) NOT NULL, dsasocia VARCHAR(32) NOT NULL, innumreg INT NOT NULL DEFAULT 0, dbmonto DOUBLE NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idajuste, dsasocia), FOREIGN KEY(idajuste) REFERENCES jctm03t(idajuste) ON DELETE NO ACTION ON UPDATE NO ACTION )ENGINE=INNODB; DROP TABLE IF EXISTS jctm3tt; CREATE TABLE jctm3tt ( idajuste VARCHAR(32) NOT NULL, intipgas VARCHAR(1) NOT NULL, innumreg INT NOT NULL DEFAULT 0, dbmonto DOUBLE NOT NULL, cdusuari VARCHAR(32) NOT NULL, tmstmp DATETIME NOT NULL, PRIMARY KEY(idajuste, intipgas), FOREIGN KEY(idajuste) REFERENCES jctm03t(idajuste) ON DELETE CASCADE ON UPDATE CASCADE )ENGINE=INNODB; CALL cifrasControlAjuste('0000000000000229', @ajuste, @error); SELECT @error, @ajuste; DELETE FROM jctm3ct; DELETE FROM jctm3tt; DELETE FROM jctm3at; DELETE FROM jctm3rt; DELETE FROM jctm03t; -- GENERACION DE CIFRAS CONTROL POR USUARIO -- CALCULA LA VERSION: DROP PROCEDURE IF EXISTS cifrasControlAjuste; CREATE PROCEDURE cifrasControlAjuste(IN periodo VARCHAR(32), OUT ajuste VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE errno INT; DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN -- GET CURRENT DIAGNOSTICS CONDITION 1 errno = MYSQL_ERRNO; -- SET error = (SELECT CONCAT(errno, ': 50012 - Un error ha ocurrido al generar las cifras control del Ajuste, la operación se suspendió')); SET error = (SELECT CONCAT(': 50012 - Un error ha ocurrido al generar las cifras control del Ajuste, la operación se suspendió')); SELECT error; ROLLBACK; END; START TRANSACTION; -- PASO 1. Crear el ajuste SET @tmstmp = (SELECT CURRENT_TIMESTAMP); SET @identificador = (SELECT md5(CONCAT(periodo, @tmstmp))); INSERT INTO jctm03t VALUES (@identificador, CURRENT_DATE, periodo, getUser(), @tmstmp); -- PASO 2. Inserta los registros considerados para el ajuste INSERT INTO jctm3rt SELECT @identificador, idreggas FROM jctm09t WHERE idnumper = periodo AND instatus = 'Q'; -- Paso 3. Inserta las cifras por asociado INSERT INTO jctm3at SELECT @identificador, dsasocia, COUNT(total) cuenta, ROUND(SUM(total), 2) total, getUser(), CURRENT_TIMESTAMP FROM jctm09t A INNER JOIN ( SELECT idreggas, SUM(dbimpreg) AS total FROM jctc10v GROUP BY idreggas ) C ON A.idreggas = C.idreggas WHERE instatus = 'Q' AND idnumper = periodo GROUP BY dsasocia, idnumper; -- Paso 4. Inserta las cifras por tipo de gasto INSERT INTO jctm3tt SELECT @identificador, intipgas, COUNT(total) cuenta, ROUND(SUM(total), 2) total, getUser(), CURRENT_TIMESTAMP FROM jctm09t A INNER JOIN ( SELECT idreggas, SUM(dbimpreg) AS total FROM jctc10v GROUP BY idreggas ) C ON A.idreggas = C.idreggas WHERE instatus = 'Q' AND idnumper = periodo GROUP BY intipgas, idnumper; -- Paso 5. Generamos las cifras control totales INSERT INTO jctm3ct SELECT @identificador, CURRENT_DATE, SUM(a.dbmonto) dbmonto, SUM(a.innumreg) inregaso, SUM(b.innumreg) inregtip, MAX(a.dbmonto) dbmaxaso, MIN(a.dbmonto) dbminaso, MAX(b.dbmonto) dbmaxtip, MIN(b.dbmonto) dbmintip, getUser(), CURRENT_TIMESTAMP FROM jctm3at a INNER JOIN jctm3tt b ON a.idajuste = b.idajuste WHERE a.idajuste = @identificador GROUP BY a.idajuste; SELECT @identificador INTO ajuste; -- PASO 6. Cierra los registros y cabeceras: UPDATE jctm10t SET instatus = 'C' WHERE idreggas IN ( SELECT idreggas FROM jctm09t WHERE instatus = 'Q' AND idnumper = periodo); UPDATE jctm09t SET instatus = 'C' WHERE instatus = 'Q' AND idnumper = periodo; COMMIT; END; // SELECT A.idajuste, A.fefecaju, A.idnumper, B.dbmonto, B.inregaso, B.inregtip, B.dbmaxaso, B.dbminaso, B.dbmaxtip, B.dbmintip FROM jctm03t A INNER JOIN jctm3ct B ON A.idajuste = B.idajuste WHERE idnumper = '0000000000000018'; -- validacion de cuentas: SELECT round(sum(dbmonto),2) monto, count(innumreg) cuenta FROM jctm3at UNION SELECT round(sum(dbmonto),2) monto, count(innumreg) cuenta FROM jctm3tt; -- AJUSTE PARA EL PRIMER PERIODO REGISTRADO: -- INSERT INTO jctm02t VALUES ('0000000000000013', 0,0,0,0,0,0,0,'admin',CURRENT_TIMESTAMP); -- CIfras para ajustes a periodos: -- >>>>>> -- GENERACION DE CIFRAS CONTROL POR USUARIO DROP PROCEDURE IF EXISTS cifrasControl; CREATE PROCEDURE cifrasControl(IN periodo VARCHAR(32), OUT error VARCHAR(250)) BEGIN DECLARE errno CHAR(5); DECLARE mensaje TEXT; DECLARE pendiente DOUBLE(13,2); DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN -- GET DIAGNOSTICS CONDITION 1 -- errno = RETURNED_SQLSTATE, mensaje = MESSAGE_TEXT; SET error = (SELECT CONCAT('50012: Un error ha ocurrido al generar las cifras control.')); -- SELECT error; ROLLBACK; END; SELECT ROUND(IFNULL(SUM(dbimpreg),0),2) INTO pendiente FROM jctm10t A INNER JOIN jctm09t B ON A.idreggas = B.idreggas WHERE B.idnumper = periodo AND B.instatus='A'; IF (pendiente>0) THEN SET error = (SELECT CONCAT('50013 - Existen gastos pendientes de autorizar. No se puede cerrar el periodo hasta que no se Autoricen los gastos registrados: ', pendiente)); ELSE START TRANSACTION; -- PASO 1. Genera cifras por asociado: -- ELIMINAR RESULTADOS PREVIOS DELETE FROM jctm2at WHERE idnumper = periodo; DELETE FROM jctm2tt WHERE idnumper = periodo; DELETE FROM jctm02t WHERE idnumper = periodo; -- GENERACION DE CIFRAS CONTROL POR ASOCIADO INSERT INTO jctm2at SELECT idnumper, dsasocia, COUNT(total) cuenta, ROUND(SUM(total), 2) total, getUser(), CURRENT_TIMESTAMP FROM jctm09t A INNER JOIN ( SELECT idreggas, SUM(dbimpreg) AS total FROM jctc10v GROUP BY idreggas ) C ON A.idreggas = C.idreggas WHERE instatus = 'C' AND idnumper = periodo GROUP BY dsasocia, idnumper; -- GENERACION DE CIFRAS CONTROL POR TIPO INSERT INTO jctm2tt SELECT idnumper, intipgas, COUNT(total) cuenta, ROUND(SUM(total), 2) total, getUser(), CURRENT_TIMESTAMP FROM jctm09t A INNER JOIN ( SELECT idreggas, SUM(dbimpreg) AS total FROM jctc10v GROUP BY idreggas ) C ON A.idreggas = C.idreggas WHERE instatus = 'C' AND idnumper = periodo GROUP BY intipgas, idnumper; -- GENERACION DE CIFRAS CONTROL INSERT INTO jctm02t SELECT a.idnumper, SUM(a.dbmonto) dbmonto, SUM(a.innumreg) inregaso, SUM(b.innumreg) inregtip, MAX(a.dbmonto) dbmaxaso, MIN(a.dbmonto) dbminaso, MAX(b.dbmonto) dbmaxtip, MIN(b.dbmonto) dbmintip, getUser(), CURRENT_TIMESTAMP FROM jctm2at a INNER JOIN jctm2tt b ON a.idnumper = b.idnumper WHERE a.idnumper = periodo GROUP BY a.idnumper; -- Actualización para cierre: se cierra el periodo y se agregan comentarios -> UPDATE jctm01t SET instatus = 'C', dscoment = CONCAT(getUser(), '- CIERRE DE PERIODO ', CURRENT_TIMESTAMP), dtfeccie = CURRENT_DATE WHERE idnumper = periodo; COMMIT; END IF; END; // SELECT a.idnumper, inanyper, innumper, dtfecape, dtfeccie, instatus, dscoment, dbmonto, incuenta FROM jctm01t a LEFT JOIN jctm02t b ON a.idnumper = b.idnumper LEFT JOIN ( SELECT COUNT(A.idreggas) incuenta, idnumper FROM jctm10t A INNER JOIN jctm09t B ON A.idreggas = B.idreggas WHERE B.instatus = 'A' GROUP BY idnumper ) c ON a.idnumper = c.idnumper ORDER BY inanyper DESC, innumper DESC DROP PROCEDURE IF EXISTS cierraRegistroCabecera; CREATE PROCEDURE cierraRegistroCabecera(IN cabecera VARCHAR(32), OUT estatus CHAR(1), OUT error VARCHAR(250)) BEGIN DECLARE existe INT; SELECT COUNT(idreggas) INTO existe FROM jctm09t WHERE idreggas = cabecera AND instatus = 'A'; IF(existe>0) THEN -- --> cafaray 221217: AUTORIZACION MASIVA DE DETALLE DE REGISTROS QUE NO FUERON AUTORIZADOS PREVIAMENTE: UPDATE jctm10t SET instatus = 'A', dsautori = 'ok - auto.cierre' WHERE instatus = 'P' AND IFNULL(dsautori,'') = '' AND idreggas = cabecera; UPDATE jctm09t SET instatus = 'C' WHERE idreggas = cabecera; SET estatus = 'C'; SET error = ''; ELSE SET estatus = ' '; SET error = 'El registro no existe. Verifique.'; END IF; END // -- --> cafaray 221217: AUTORIZACION MASIVA DE DETALLE DE REGISTROS PARA AJUSTE: DROP PROCEDURE IF EXISTS cierraRegistroCabeceraAjuste; CREATE PROCEDURE cierraRegistroCabeceraAjuste(IN cabecera VARCHAR(32), OUT estatus CHAR(1), OUT error VARCHAR(250)) BEGIN DECLARE existe INT; SELECT COUNT(idreggas) INTO existe FROM jctm09t WHERE idreggas = cabecera AND instatus = 'A'; IF(existe>0) THEN UPDATE jctm10t SET instatus = 'Q', dsautori = 'ok - auto.cierre' WHERE instatus <> 'A' AND IFNULL(dsautori,'') = ''; UPDATE jctm09t SET instatus = 'Q' WHERE idreggas = cabecera; SET estatus = 'Q'; SET error = ''; ELSE SET estatus = ' '; SET error = 'El registro no existe. Verifique.'; END IF; END // ALTER TABLE jctm09t MODIFY COLUMN instatus ENUM('A','C','Q',' ') NOT NULL DEFAULT 'A' AFTER intipgas; DROP FUNCTION IF EXISTS obtieneImporteCabecera; CREATE FUNCTION obtieneImporteCabecera(cabecera VARCHAR(32)) RETURNS double DETERMINISTIC RETURN (SELECT idreggas, IFNULL(SUM(dbimpreg), 0) FROM jctm10t WHERE idreggas = cabecera GROUP BY idreggas); SELECT dsasocia, IFNULL(SUM(importe), 0) FROM jctm09t A LEFT JOIN ( SELECT idreggas, dbimpreg importe FROM jctm10t ) B ON A.idreggas = B.idreggas WHERE idnumper = '0000000000000234' GROUP BY dsasocia; SELECT a.idnumper, inanyper, innumper, dtfecape, dtfeccie, instatus, dscoment, dbmonto, incuenta FROM jctm01t a LEFT JOIN jctm02t b ON a.idnumper = b.idnumper LEFT JOIN ( SELECT COUNT(A.idreggas) incuenta, idnumper FROM jctm10t A INNER JOIN jctm09t B ON A.idreggas = B.idreggas WHERE B.instatus = 'A' OR B.instatus = 'Q' GROUP BY idnumper ) c ON a.idnumper = c.idnumper ORDER BY inanyper DESC, innumper DESC; mysql> SELECT A.idreggas, dsasocia, idnumper, A.intipgas, A.instatus, fefecreg, dsdocto, dsrefdoc FROM jctm09t A WHERE idnumper = '904a0649b396c9181551424146270946'; +----------------------------------+------------------------+----------------------------------+----------+----------+------------+----------------------+------------------------------+ | idreggas | dsasocia | idnumper | intipgas | instatus | fefecreg | dsdocto | dsrefdoc | +----------------------------------+------------------------+----------------------------------+----------+----------+------------+----------------------+------------------------------+ | 1ff31fee729f8abcf10099919369c0c0 | <NAME> | 904a0649b396c9181551424146270946 | | A | 2019-09-03 | FRENOSTRASEROSMAZDA | TRANSFERENCIA | | 2bb57b26659a7ece6af9036dea7cccdf | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | | 36150ec95de70b7f80a66771ca1a0d20 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | | 5ac56103702e865216508030bbdc412c | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | | 60e5b8ec08240b113a3fd721c1b7dbe2 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | | bbfa3407bb0db74982cb51d6d3a520de | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | | bd186d38e7f7e07cb935ad499dd3b173 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-04 | facturas | viaje a san José del Cabo | | e891b1fd89eb068979a5b97d8dbefdce | <NAME> | 904a0649b396c9181551424146270946 | h | A | 2019-09-06 | <NAME>arques | Del 28-08-2019 al 02-09-2019 | | ee64b4fbd34debda86f3a8ffeb48e182 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-04 | facturas | Productos Perla | | f458deb1405cbc00c0761389272d0871 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-03 | AGOSTO419 | TARJETA | +----------------------------------+------------------------+----------------------------------+----------+----------+------------+----------------------+------------------------------+ 10 rows in set (0.24 sec) SELECT A.idreggas, dsasocia, A.idnumper, A.intipgas, A.instatus, fefecreg, dsdocto, dsrefdoc, total, cuenta FROM jctm09t A LEFT JOIN ( SELECT idnumper, idreggas, IFNULL(SUM(dbimpreg), 0) AS total, IFNULL(COUNT(idreggas), 0) cuenta FROM jctc10v GROUP BY idnumper, idreggas ) C ON A.idreggas = C.idreggas AND C.idnumper = A.idnumper WHERE (A.idnumper = '904a0649b396c9181551424146270946' AND A.intipgas = 't' AND A.instatus = 'A') ORDER BY fefecreg; +----------------------------------+----------------------------------+----------------------------------+----------+----------+------------+------------------------+------------------------------------+--------------------+--------+ | idreggas | dsasocia | idnumper | intipgas | instatus | fefecreg | dsdocto | dsrefdoc | total | cuenta | +----------------------------------+----------------------------------+----------------------------------+----------+----------+------------+------------------------+------------------------------------+--------------------+--------+ | 006784e6b0d03d8f97d11357ecaef92f | <NAME> | 0000000000000078 | | A | 2016-04-08 | | | NULL | NULL | | 0192bd85012655d241723ebfe5103006 | <NAME> | 0000000000000234 | t | A | 2017-12-01 | factura | zamora28/11/2017 | NULL | NULL | | 04bf98592ec7437bf53c0904bd2e3a8c | <NAME> | 0000000000000219 | t | A | 2017-10-30 | facturas | 20-24/10/2017 | NULL | NULL | | 06cad23b1db3248233b261d8ffeb0909 | NAD GLOBAL LAREDO | 0000000000000078 | a | A | 2016-04-04 | C122691 | LA1-BIO-178263 | NULL | NULL | | 08f8122da0dc08d5ddfd85ee4cb4be22 | <NAME> | 0000000000000274 | t | A | 2018-03-09 | | | NULL | NULL | | 0e9634dc2007a68bb20c6799e3950c9f | PRID<NAME>sorcio Adual SAPI de CV | 87242238cd9fa6cc280d8dd7fcfb08be | a | A | 2019-03-21 | 9000441 | VNPS06999 | 9083.6 | 3 | | 1e63372acb6eba457a5dfd4b5bc35be4 | <NAME> | b6efe2fdccf1448f708bd01b0c877948 | t | A | 2018-07-19 | FACTURA | SERVIC<NAME> | 1199 | 1 | | 1f70d9d90675e7b28d20b2044af60d5d | <NAME> | 0000000000000274 | t | A | 2018-02-16 | colima febrero_2018 | citrojugo | NULL | NULL | | 1ff31fee729f8abcf10099919369c0c0 | <NAME> | 904a0649b396c9181551424146270946 | | A | 2019-09-03 | FRENOSTRASEROSMAZDA | TRANSFERENCIA | 2850 | 1 | | 21b54ae2f98d21ac69ca1419d5bee8b4 | PRIDA Consorcio Adual SAPI de CV | 134b538e7242305827b5f14cf765f703 | a | A | 2019-04-09 | 9000551 | VNPS07132 | 12186.130000000001 | 4 | | 275f34b0f264df48df6ce22fa40a6108 | NAD GLOBAL VERACRUZ BIOTECSA | 0000000000000055 | a | A | 2016-02-19 | H44871 | 52401/15 | NULL | NULL | | 2bb57b26659a7ece6af9036dea7cccdf | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | NULL | NULL | | 2e5837efa3a72f946b97704a738fa8c0 | <NAME> | 0000000000000134 | | A | 2016-11-01 | | | NULL | NULL | | 2ec347169697513d4135c6f51374417b | <NAME> | 0000000000000283 | t | A | 2018-03-20 | FACTURA | oaxxaca | NULL | NULL | | 2feb4820a8b982b85bb5d43c15b9dfe7 | <NAME> | 907320b052d370940076118c26717a41 | h | A | 2019-05-13 | CAJA 13 MAYO | CAJA #10 | 545.5 | 2 | | 304e2aaf761ee210d13025ba78495aa3 | <NAME> | 0000000000000199 | t | A | 2017-08-14 | | AMORTIGUADORES Y FRENOS | NULL | NULL | | 339718d46643a0cf3ae2a447477bf202 | <NAME> | 907320b052d370940076118c26717a41 | t | A | 2019-05-21 | <NAME> | | NULL | NULL | | 34617c802d751eae31b907ccde1a4b37 | <NAME> | 9540a456ccb231561b8cada65a01815f | h | A | 2019-07-17 | CAJA 16 | 21 DE MAYO AL 10 JUNIO 2019 | 3879.3399999999997 | 14 | | 36150ec95de70b7f80a66771ca1a0d20 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | NULL | NULL | | 3c28b2ac368526f86318e923f720838e | <NAME> | 0000000000000234 | t | A | 2017-12-13 | Prueba 13 Dic | Preuba 13 Dic | NULL | NULL | | 3d13de8a306d3ae891ed53381c179274 | <NAME> | 134b538e7242305827b5f14cf765f703 | t | A | 2019-04-07 | VERIFICACION AVEO | MYJ2889 | NULL | NULL | | 40b4c9d47493bc604012b85c1fdd8003 | PRIDA Consorcio Adual SAPI de CV | 0000000000000050 | a | A | 2016-01-29 | 5003219 | EACM08990 | NULL | NULL | | 40f164c60a33f7a392f8d4c0600e9f20 | NAD GLOBAL LAREDO | 0000000000000291 | a | A | 2018-04-10 | TC1705 | LA1-BIO-289666G | NULL | NULL | | 4126b78c2d4461526e9b40b9b30109fb | <NAME> | 134b538e7242305827b5f14cf765f703 | h | A | 2019-04-30 | CAJA 10 | 26 DE MARZO AL 15 DE ABRIL DE 2019 | 4509.29 | 18 | | 449b89e30e3086cfc2410ea8d31326f8 | NAD GLOBAL LAREDO | 0000000000000234 | a | A | 2018-02-02 | TC392 | LA1-BIO-273401G | NULL | NULL | | 450deff05b39cd29a5927df1e4e062af | Nad Global | 0f29f7fa65afe974e71364251a9eb267 | a | A | 2018-08-03 | TC4323 | LA1-BIO-305997G | NULL | NULL | | 475f1b85a3539add045f74781e57a422 | <NAME> | d04de1c5034eb38763ca6da0ff7955f1 | h | A | 2018-10-08 | CAJA 30 | 05 AL 11 DE SEPTIEMBRE 2018 | 1176 | 4 | | 47b354f43d888a9d49d5f2a9781a573f | <NAME> | d04de1c5034eb38763ca6da0ff7955f1 | h | A | 2018-10-08 | CAJA 31 | 12 AL 25 DE SEPTIEMBRE 2018 | 2981.52 | 13 | | 48c3326469c9e9f686b333f03e3adefc | <NAME> | 0000000000000199 | t | A | 2017-08-31 | | | NULL | NULL | | 4922cd2de9274875a44d954e62fecdde | <NAME> | 0000000000000154 | | A | 2017-01-13 | | | NULL | NULL | | 4af328a5d894e0fb2917a2de686f634e | PRIDA Consorcio Adual SAPI de CV | 87242238cd9fa6cc280d8dd7fcfb08be | a | A | 2019-03-15 | 9003415 | MXPS20768 | 9267.73 | 3 | | 514103862803204a8e797b60f9749f17 | <NAME> | 0000000000000283 | t | A | 2018-04-03 | apatzingan 2018 marzo | facturas | NULL | NULL | | 5509cda6c020d71b1485b41c069f0856 | PRIDA Consorcio Adual SAPI de CV | 87242238cd9fa6cc280d8dd7fcfb08be | a | A | 2019-03-28 | 9003509 | MXPS21004 | 23862.47 | 4 | | 572ae7388ba8e5fc2920afd8471f3e93 | <NAME> | 0000000000000199 | t | A | 2017-08-01 | F10091 | PRUEBA FARIAS | 557 | 1 | | 5ac56103702e865216508030bbdc412c | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | NULL | NULL | | 60e5b8ec08240b113a3fd721c1b7dbe2 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | 2844.1 | 2 | | 619d3eb9d4ebfecd5aace3ab5d1215bb | <NAME> | b6efe2fdccf1448f708bd01b0c877948 | t | A | 2018-07-20 | facturas casetas B | del 10 al 16 jl | NULL | NULL | | 62da00430baa5335bcf8a307fdb09760 | <NAME> | 0a9121a8d67c7ab83e82298273357ea6 | h | A | 2019-02-13 | CAJA 13 FEB | CAJA #04 | 1915 | 3 | | 648799a9bd7485c300cf78ddc8c73f16 | NAD GLOBAL VERACRUZ BIOTECSA | 0000000000000055 | a | A | 2016-02-19 | H45475 | 52727/15 | NULL | NULL | | 6b14ca2ac8f4999c2e09a6068429ad7f | <NAME> | 0000000000000018 | | A | 2015-01-01 | | | NULL | NULL | | 6b5474f721180306aeca4517a3345648 | <NAME> | 0000000000000274 | h | A | 2018-02-14 | CAJA 06 | DEL 07 AL 13 DE FEBRERO 2018 | NULL | NULL | | 6eb559603f417472bb33368d24c2c9bf | <NAME> | 6b14ca2ac8f4999c2e09a6068429ad7f | t | A | 2019-06-03 | Facturas | Puebla | NULL | NULL | | 748d8f489dbafee6055e29f19cb67927 | <NAME> | 0000000000000106 | | A | 2016-08-22 | | | 151 | 1 | | 76192119d94c9527b019f13c4ea1ffdc | <NAME> | 33de046fb3bfc2e4eef90e8c09611a8a | h | A | 2018-09-24 | CAJA 28 | 22 AL 28 DE AGOSTO 2018 | 2339.89 | 10 | | 762fbe29471521347fa7a4a217dac096 | <NAME> | 0000000000000067 | h | A | 2016-03-24 | | | NULL | NULL | | 836acba7d0b251455de8ff36a4de78d9 | <NAME> | 0000000000000234 | t | A | 2017-12-13 | PruebaDic13 | Dic13 | NULL | NULL | | 87242238cd9fa6cc280d8dd7fcfb08be | <NAME> | 0000000000000018 | | A | 2015-01-01 | | | NULL | NULL | | 875cb60f4c9b3fb3adbb3a1146ccfaa8 | PRIDA Consorcio Adual SAPI de CV | 0000000000000050 | a | A | 2016-01-06 | 5002993 | VNPS00752 | NULL | NULL | | 8f5a43376d9d8b5d92036aa40d560306 | <NAME> | 9540a456ccb231561b8cada65a01815f | h | A | 2019-07-18 | CAJA 17 | 11 AL 25 JUNIO 2019 | 2697.59 | 14 | | 91bacfa2c36adb0a01fc122b864a1fca | <NAME> | 0000000000000219 | t | A | 2017-10-10 | Octubre | proveedores | NULL | NULL | | 92695acfc04b42e77ebe1a5052a05011 | <NAME> | 0000000000000234 | t | A | 2017-12-06 | NL8595SR | TEST DE CARGA DE GASTO | NULL | NULL | | 954839f629342dccea3023c7d34e4680 | <NAME> | 134b538e7242305827b5f14cf765f703 | h | A | 2019-04-30 | CAJA 11 | 16 AL 23 DE ABRIL DEL 2019 | 1934.5800000000002 | 7 | | 958663a4b1f6d3e9496477fc39f92be6 | PRIDA Consorcio Adual SAPI de CV | 0a9121a8d67c7ab83e82298273357ea6 | a | A | 2019-02-12 | 9000204 | vnps06838 | 31185.38 | 5 | | 9ea06fae26f2499ce3901b927d8e1bd3 | <NAME> | 0000000000000210 | t | A | 2017-09-04 | facturas 29-31-08-2017 | | NULL | NULL | | a22c9ba284e13f4d2e314ef670f1e6ce | GUSTAVO <NAME> | 6b14ca2ac8f4999c2e09a6068429ad7f | t | A | 2019-06-03 | Facturas | Puebla | 222 | 2 | | a641e65a2d26c693253a20e9746acc8a | <NAME> | 0000000000000210 | t | A | 2017-09-28 | casetaspueblasep | efectivo | NULL | NULL | | aa87b85292dc61576907a316672bfad9 | <NAME> | 0000000000000199 | t | A | 2017-08-22 | <NAME> | <NAME> | NULL | NULL | | b879fc20cd19e5574ac5a106a85c7831 | <NAME> | 87242238cd9fa6cc280d8dd7fcfb08be | t | A | 2019-03-28 | Fact Estacionamiento | | 576 | 1 | | b978a7a5c4e5ef8e1ddd029098faed0b | <NAME> | 33de046fb3bfc2e4eef90e8c09611a8a | h | A | 2018-09-24 | CAJA 27 | 15 AL 21 DE AGOSTO 2018 | 1440 | 4 | | bbfa3407bb0db74982cb51d6d3a520de | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-06 | | | NULL | NULL | | bd186d38e7f7e07cb935ad499dd3b173 | GUSTAVO <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-04 | facturas | viaje a san José del Cabo | NULL | NULL | | be07785985737968a31e9748735e9b84 | <NAME> | b6efe2fdccf1448f708bd01b0c877948 | t | A | 2018-07-24 | factura | servicio 140mil | 1124 | 1 | | c2a7753608b67f11b54b27239bb013f3 | PRIDA Consorcio Adual SAPI de CV | 134b538e7242305827b5f14cf765f703 | a | A | 2019-04-09 | 9003585 | mxps21260 | 6877.95 | 4 | | cc25579246dc48e32f4e3ddbc8765a2b | <NAME> | b6efe2fdccf1448f708bd01b0c877948 | t | A | 2018-07-20 | facturas casetas A | del 4 al 9 Jl | NULL | NULL | | cddeddeb70e4641f822c8de290cb1c68 | <NAME> | 0000000000000199 | t | A | 2017-08-29 | pago en efctivo | agosto | 5078 | 4 | | d432699df2ed8dd379280999aa26911d | <NAME> | 0000000000000195 | t | A | 2017-07-04 | | | 5070.67 | 1 | | da61c32675c54d365660e9bd8079451b | <NAME> | 134b538e7242305827b5f14cf765f703 | t | A | 2019-03-28 | <NAME> | | NULL | NULL | | dd66bc3d77431cf969dc1a1681e2bfbb | <NAME> | 0f29f7fa65afe974e71364251a9eb267 | h | A | 2018-08-03 | CAJA 03 AGOSTO | CAJA #12 | 830 | 4 | | e12499815266f7b8b6b5f7a03e4cbc77 | NAD GLOBAL LAREDO | 0000000000000055 | a | A | 2016-02-18 | C103260 | LA1-BIO-138742 | NULL | NULL | | e891b1fd89eb068979a5b97d8dbefdce | <NAME> | 904a0649b396c9181551424146270946 | h | A | 2019-09-06 | <NAME> | Del 28-08-2019 al 02-09-2019 | 5619.449999999999 | 14 | | ee64b4fbd34debda86f3a8ffeb48e182 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-04 | facturas | Productos Perla | 378.46 | 1 | | eeb5d16304d758fccb6746b3543f702d | <NAME> | b6efe2fdccf1448f708bd01b0c877948 | t | A | 2018-07-16 | MPE2-59225 | Gas 12jul18 | 2258.8 | 4 | | f2d7583591b1e02e8f1077f94dc8a51f | <NAME> | 0000000000000164 | t | A | 2017-04-27 | | | NULL | NULL | | f458deb1405cbc00c0761389272d0871 | <NAME> | 904a0649b396c9181551424146270946 | t | A | 2019-09-03 | AGOSTO419 | TARJETA | 2280.4700000000003 | 6 | | f721e8bac163461f34722140e624e3b9 | <NAME> | 0000000000000164 | t | A | 2017-04-19 | <NAME> Abr 17 | 190417 | NULL | NULL | | fc1c28764c8f14521d20d22c30e8d5c6 | <NAME> | d6ec7a110a26a4e2d27335c812a40b43 | h | A | 2018-11-13 | CAJA 35 | 17 AL 26 DE OCTUBRE 2018 | 4689.92 | 11 | | fc556fda4874f936789bff3278ebb4b8 | <NAME> | 0000000000000274 | t | A | 2018-02-20 | facturas | comida toluca G | NULL | NULL | +----------------------------------+----------------------------------+----------------------------------+----------+----------+------------+------------------------+------------------------------------+--------------------+--------+ 77 rows in set (0.26 sec) DROP VIEW IF EXISTS jctc10v; CREATE VIEW jctc10v AS SELECT C.idnumper, A.idreggas, A.idregper, A.intipreg, A.dtfecreg, A.dbimpreg, A.dbimpues, A.dsautori, A.instatus, A.dsnotreg, B.iddocele, A.dsregper FROM jctm10t A LEFT JOIN jctm11t B ON A.idreggas = B.idreggas AND A.idregper = B.idregper -- WHERE A.idreggas IN ('60e5b8ec08240b113a3fd721c1b7dbe2', '5ac56103702e865216508030bbdc412c','36150ec95de70b7f80a66771ca1a0d20', '2bb57b26659a7ece6af9036dea7cccdf','1ff31fee729f8abcf10099919369c0c0','bb<KEY>','bd186d38e7f7e07cb935ad499dd3b173','e891b1fd89eb068979a5b97d8dbefdce','ee64b4fbd34debda86f3a8ffeb48e182','f458deb1405cbc00c0761389272d0871') ORDER BY A.dtfecreg; DROP VIEW IF EXISTS jctc10v; CREATE VIEW jctc10v AS SELECT C.idnumper, A.idreggas, A.idregper, A.intipreg, A.dtfecreg, fefecreg, A.dbimpreg, A.dbimpues, A.dsautori, A.instatus, A.dsnotreg, B.iddocele, A.dsregper FROM jctm09t C INNER JOIN jctm10t A ON C.idreggas = A.idreggas LEFT JOIN jctm11t B ON A.idreggas = B.idreggas AND A.idregper = B.idregper ORDER BY A.dtfecreg; -- WHERE A.idreggas IN ('60e5b8ec08240b113a3fd721c1b7dbe2', '5ac56103702e865216508030bbdc412c','36150ec95de70b7f80a66771ca1a0d20', '2bb57b26659a7ece6af9036dea7cccdf','1ff31fee729f8abcf10099919369c0c0','bbfa3407bb0db74982cb51d6d3a520de','bd186d38e7f7e07cb935ad499dd3b173','e891b1fd89eb068979a5b97d8dbefdce','ee64b4fbd34debda86f3a8ffeb48e182','f458deb1405cbc00c0761389272d0871') SELECT A.idreggas, dsasocia, idnumper, A.intipgas, A.instatus, fefecreg, dsdocto, dsrefdoc, total, cuenta FROM jctm09t A LEFT JOIN ( SELECT idreggas, IFNULL(SUM(dbimpreg), 0) AS total, IFNULL(COUNT(idreggas), 0) cuenta FROM jctc10v GROUP BY idreggas ) C ON A.idreggas = C.idreggas;<file_sep>/nbproject/private/private.properties <<<<<<< HEAD deploy.ant.properties.file=/Users/Carlos/Library/Application Support/NetBeans/8.0.2/tomcat80.properties ======= deploy.ant.properties.file=/Users/omash/Library/Application Support/NetBeans/8.0.2/tomcat70.properties >>>>>>> 4d87126210f923b2862e70a54d5a54f5a41d2238 j2ee.platform.is.jsr109=true j2ee.server.domain=/Users/omash/Library/Application Support/NetBeans/8.0.2/apache-tomcat-8.0.15.0_base j2ee.server.home=/Applications/NetBeans/apache-tomcat-8.0.15 j2ee.server.instance=tomcat80:home=/Applications/NetBeans/apache-tomcat-8.0.15:base=apache-tomcat-8.0.15.0_base j2ee.server.middleware=/usr/local/glassfish3 javac.debug=true javadoc.preview=true <<<<<<< HEAD selected.browser=SL[/Browsers/SafariBrowser user.properties.file=/Users/cofa/Library/Application Support/NetBeans/7.4/build.properties ======= selected.browser=SL[/Browsers/FirefoxBrowser user.properties.file=/Users/omash/Library/Application Support/NetBeans/8.0.2/build.properties >>>>>>> 360b97329eafe5bee2702e1aec00d2c72111f027
c889bdad0fa8f6dffa110c1143d49e5776fd9162
[ "SQL", "JavaScript", "Markdown", "INI", "Java", "Shell" ]
46
SQL
cafaray/ftcgedoc
5bb668f2564267fd842db64fca04c7f8a12af0c0
5003a24e6e9a0a01bb612415ad7f193c37507102
refs/heads/master
<repo_name>davidcostadev/ufersavdbAPI<file_sep>/db/transactions.js var connector = require('./connector'); var response = require('../response'); var db = connector.db; var queryResult = connector.queryResult; // add query functions function getCountAllLogs(req, res, next) { return db.func('getCountAllLogs',undefined,queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o total de transações')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getAllLogs(req, res, next) { return db.func('getvdbLogs',undefined,queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou todas as transações')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getLogBk(req, res, next) { var bkID = parseInt(req.params.id); return db.func('getvdbLogsBk', bkID, queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou as transações de uma bike')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getLogSt(req, res, next) { var stID = parseInt(req.params.id); return db.func('getvdbLogsSt', stID, queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou as transações de uma estação')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getLogCli(req, res, next) { var userID = parseInt(req.params.id); return db.func('getvdbLogsCli', userID, queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou as transações de um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getBikeOfCli(req, res, next) { var bkId = parseInt(req.params.id); return db.func('getBikeOfCli', bkId, queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Retorna a bike de um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getCountSt(req, res, next) { return db.func('getCountAllLogsSt',parseInt(req.params.id),queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o total de transações de uma estação')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getCountBk(req, res, next) { return db.func('getCountAllLogsBk',parseInt(req.params.id),queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o total de transações de uma bicicleta')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getCountCli(req, res, next) { return db.func('getCountAllLogsCli',parseInt(req.params.id),queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o total de transações de um cliente')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function doLoan(req, res, next) { return db.func('open_vdb_log',[parseInt(req.params.cli),parseInt(req.params.bk),parseInt(req.params.st), parseInt(req.params.sl)],queryResult.one) .then(function () { res.status(200) .json(response.success({}, 'Uma bike foi entregue')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function doReturn(req, res, next) { return db.func('close_vdb_log',[parseInt(req.params.cli),parseInt(req.params.bk),parseInt(req.params.st), parseInt(req.params.sl)],queryResult.one) .then(function () { res.status(200).json(response.success({}, 'Uma bike foi devolvida')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } module.exports = { getCountAllLogs: getCountAllLogs, // feito getAllLogs: getAllLogs, // feito getLogBk: getLogBk, // feito getLogSt: getLogSt, // feito getLogCli: getLogCli, // feito getBikeOfCli: getBikeOfCli, getCountBk: getCountBk, getCountSt: getCountSt, getCountCli: getCountCli, doLoan: doLoan, // feito doReturn: doReturn // feito }; <file_sep>/db/bikes.js var connector = require('./connector'); var response = require('../response'); var db = connector.db; var queryResult = connector.queryResult; // add query functions function getBikes(req, res, next) { return db.func('getBikes',undefined,queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou todas as bikes')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getOnRideBikes(req, res, next) { return db.func('getOnRideBikes',undefined,queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou todas as bikes cedidas')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getBikesName(req, res, next) { return db.func('getBksName',undefined,queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o nome de todas as bikes')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getValBks(req, res, next) { return db.func('getOpBikes',undefined,queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o ID e o nome de bicicletas válidas')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getBike(req, res, next) { var bkID = parseInt(req.params.id); return db.func('getBike', bkID, queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou uma bike')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } /*function getUserByUserName(req, res, next) { db.func('getClientByUserName', req.params.name, queryResult.one) .then(function (data) { res.status(200) .json({ status: 'success', data: data, message: 'Retornou um usuário' }); }) .catch(function (err) { res.status(500) .json({ status: 'internal server error', data: err, message: 'Erro no servidor' }) }); }*/ function getBikesSt(req, res, next) { return db.func('getBikesSt',parseInt(req.params.id),queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou todas as bikes da estação')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getBikesOnSt(req, res, next) { return db.func('getBikesOnSt',parseInt(req.params.id),queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou todas as bikes que estão na estação')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getBikesOffSt(req, res, next) { return db.func('getBikesOffSt',parseInt(req.params.id),queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou todas as bikes que sairam da estação')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getLogs(req, res, next) { return db.func('getHistsBike', null, queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o histórico de todas as bikes')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getLog(req, res, next) { var bkID = parseInt(req.params.id); return db.func('getHistBike', [bkID,null], queryResult.any) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o histórico de uma bike')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function createBike(req, res, next) { return db.func('createBike',req.body.name,queryResult.one) .then(function () { res.status(200).json(response.success({}, 'Uma bike inserida')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function updateBike(req, res, next) { return db.func('upd_bike',[parseInt(req.body.idbike),req.body.name,req.body.state],queryResult.any) .then(function () { res.status(200).json(response.success({}, 'Atualizou uma bike')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function updateStation(req, res, next) { return db.func('upd_bikeSt',[parseInt(req.params.bk),parseInt(req.params.st),parseInt(req.params.sl)],queryResult.one) .then(function () { res.status(200).json(response.success({}, 'Atualizou a estação de uma bike')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function removeBike(req, res, next) { var bkID = parseInt(req.params.id); return db.func('delBike',bkID, queryResult.one) .then(function (result) { res.status(200).json(response.success({}, 'Removeu uma bike')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function removeBikes(req, res, next) { return db.func('delBikes',undefined,queryResult.one) .then(function (result) { res.status(200).json(response.success({}, 'Removeu '+result.rowCount+' bikes')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function changeState(req, res, next) { var bkID = parseInt(req.params.id); return db.func('changeBikeState',bkID, queryResult.one) .then(function (result) { res.status(200).json(response.success({}, 'Mudou o estado de 1 bike')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } module.exports = { getBikes: getBikes, // feito getOnRideBikes: getOnRideBikes, // feito getBikesName: getBikesName,// feito getValBks: getValBks, getBike: getBike, // feito getBikesSt: getBikesSt, // feito getBikesOnSt: getBikesOnSt, // feito getBikesOffSt: getBikesOffSt, // feito getBikeLogs: getLogs, // feito getBikeLog: getLog, // feito changeState: changeState, // feito updateStation: updateStation, // feito createBike: createBike, // feito updateBike: updateBike, // feito removeBikes: removeBikes, // feito removeBike: removeBike // feito }; <file_sep>/db/users.js var connector = require('./connector'); var response = require('../response'); var db = connector.db; var queryResult = connector.queryResult; // add query functions function login(req, res, next) { return db.func('getClientLogin',[req.body.username,req.body.password], queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Login de usuario ' + req.body.username + ' efetuado')); }) .catch(function (err) { res.status(401) .json(response.failure(err, 'Usuário ou password inválidos')); }); } function signup(req, res, next) { return db.func('signUpClient', [req.body.username,req.body.password,req.body.fullname,req.body.email,req.body.phone, req.body.profession,req.body.sex,req.body.birthdate],queryResult.any) .then(function () { res.status(200) .json(response.success({}, 'Um usuário cadastrado')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getUsers(req, res, next) { return db.func('getClients',undefined,queryResult.many) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou todos os usuários')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getValUsers(req, res, next) { return db.func('getValCli',undefined,queryResult.many) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o id e nome de usuários válidos')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getUserNames(req, res, next) { return db.func('getClientsUserName', undefined, queryResult.many) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o nome de todos os usuários')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getUser(req, res, next) { var userID = parseInt(req.params.id); return db.func('getClient', userID, queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getInfos(req, res, next) { return db.func('getInfosCli',undefined, queryResult.many) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou as informações de todos os usuários')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getInfo(req, res, next) { var userID = parseInt(req.params.id); return db.func('getInfoCli', userID, queryResult.one) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou as informações de um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getLogs(req, res, next) { return db.func('getHistsCli',null, queryResult.many) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o histórico de todos os usuários')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function getLog(req, res, next) { var userID = parseInt(req.params.id); return db.func('getHistCli',[userID,null], queryResult.many) .then(function (data) { res.status(200) .json(response.success(data, 'Retornou o histórico de um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function createUser(req, res, next) { return db.func('createClient', [parseInt(req.body.role),req.body.username,req.body.password],queryResult.one) .then(function () { res.status(200) .json(response.success({}, 'Um usuário inserido')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function updateUser(req, res, next) { // PADRONIZAR ID PARA IDCLI PARA DAR MATCH COM O BANCO return db.func('upd_cli',[parseInt(req.body.idcli),parseInt(req.body.role),req.body.username,req.body.password,req.body.state],queryResult.one) .then(function () { res.status(200) .json(response.success({}, 'Atualizou um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function updateUserInfo(req, res, next) { return db.func('upd_info_cli',[parseInt(req.body.id), req.body.fullname, req.body.email,req.body.phone,req.body.profession,req.body.sex, req.body.birthdate],queryResult.any) .then(function () { res.status(200) .json(response.success({}, 'Atualizou as informações de um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function removeUser(req, res, next) { var userID = parseInt(req.params.id); return db.func('delClient',userID, queryResult.one) .then(function (result) { res.status(200) .json(response.success({}, 'Removeu um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function removeUsers(req, res, next) { return db.func('delClients',undefined,queryResult.one) .then(function (result) { res.status(200) .json(response.success({}, 'Removeu '+result.rowCount+' usuários')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } function changeSit(req, res, next) { var userID = parseInt(req.params.id); return db.func('changeSit',userID, queryResult.one) .then(function (result) { res.status(200) .json(response.success({}, 'Mudou a situação de um usuário')); }) .catch(function (err) { res.status(500).json(response.failure(err)); }); } module.exports = { login: login, // feito signup: signup, // feito getUsers: getUsers, // feito getValUsers: getValUsers, // feito getUser: getUser, // feito getUserNames: getUserNames, // feito changeSit: changeSit, // feito getInfos: getInfos, // feito getInfo: getInfo, // feito getUsersLog: getLogs, // feito getUserLog: getLog, // feito createUser: createUser, // feito updateUser: updateUser, // feito updateUserInfo: updateUserInfo, // feito removeUser: removeUser, // feito removeUsers: removeUsers // feito }; <file_sep>/routes/users.js var express = require('express'); var router = express.Router(); var db = require('../db/users'); /* A ORDEM DA DECLARAÇÃO IMPORTA * * NÃO FAÇA: * get('/:id,.....); * get('/log/,.....); * * FAÇA: * get('/log/,.....); * get('/:id,.....); * */ router.get('/sit/:id', db.changeSit); router.get('/log', db.getUsersLog); router.get('/log/:id', db.getUserLog); router.get('/info', db.getInfos); router.get('/info/:id', db.getInfo); router.post('/info', db.updateUserInfo); router.get('/', db.getUsers); router.get('/val', db.getValUsers); router.get('/n', db.getUserNames); router.get('/:id', db.getUser); router.post('/', db.createUser); router.post('/login',db.login); router.post('/signup',db.signup); router.put('/', db.updateUser); router.delete('/', db.removeUsers); router.delete('/:id', db.removeUser); module.exports = router;
a2fb47aa5cbd6ed67c81d230ec9a7d158d800eb4
[ "JavaScript" ]
4
JavaScript
davidcostadev/ufersavdbAPI
e5f404e5beae1cfb56eb989a8fa8b04ed69c4921
0f3a7b0a44df37dca69441755af53e239f6cf3f1
refs/heads/master
<repo_name>tridhachaudhuri/AirPool<file_sep>/src/job.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jun 18 01:04:37 2020 @author: tridhachaudhuri """ import schedule import time import depatureairportjob from datetime import date import csv import arrivalairportjob import os import flighttoandfrom import historyofflightsjob import threading def directorycreation(): parentpath=<parent-path-to-create-folder> directory=str(date.today()) path = os.path.join(parentpath, directory) os.mkdir(path) parentpath=parentpath+str(date.today())+"/" directory="arrivals" path = os.path.join(parentpath, directory) os.mkdir(path) directory="departure" path = os.path.join(parentpath, directory) os.mkdir(path) directory="flighthistory" path = os.path.join(parentpath, directory) os.mkdir(path) directory="flightstoandfrom" path = os.path.join(parentpath, directory) os.mkdir(path) def airport_iata(airports): with open('US_airlines.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: line_count += 1 else: airports.append(row[0]) def airline_id(airlines): with open('/Users/tridhachaudhuri/Desktop/Insight/Code/pyflightdata/all_live_labels.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: line_count += 1 else: if row!=[]: airlines.append(row[0]) def departurejob(depairports): filename='airport_departure_US_'+str(date.today() )+'.csv' depatureairportjob.writeheader(filename) depatureairportjob.apidepartedfile(depairports,filename) def arrivaljob(arriveairports): filename='airport_arrival_US_'+str(date.today() )+'.csv' arrivalairportjob.writeheader(filename) arrivalairportjob.apiarrivalfile(arriveairports,filename) def flighttoandfromjob(fromtoairports): filename='fromtoairports_US_'+str(date.today() )+'.csv' flighttoandfrom.writeheader(filename) flighttoandfrom.apiarrivalfile(filename) def flighthistoryjob(airlinesid): for airid in airlines: print(airid) historyofflightsjob.flights(airid) def run_threaded(job_func): job_thread = threading.Thread(target=job_func) job_thread.start() def job1(): departurejob(airports) def job2(): arrivaljob(airports) def job3(): flighthistoryjob(airlines) def job4(): flighttoandfromjob(airports) airports=[] airlines=[] airport_iata(airports) airline_id(airlines) directorycreation() schedule.every().day.at('00:01').do(run_threaded, job1) schedule.every().day.at('00:01').do(run_threaded, job2) schedule.every().day.at('01:44').do(run_threaded, job3) schedule.every().day.at('00:01').do(run_threaded, job4) while True: schedule.run_pending() time.sleep(1) <file_sep>/src/country_airport.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jun 11 01:40:30 2020 @author: tridhachaudhuri """ from pyflightdata import FlightData import csv api=FlightData() #import pandas as pd airport_iata=[] with open('country_list.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: line_count += 1 else: airport_iata.append(row[0]) header = ['country','name','iata', 'lat','lon'] file = open('country_airport.csv', 'a+', newline ='') with file: writer = csv.writer(file) writer.writerow(header) file.close() for country in airport_iata: final=[] details=api.get_airports(country) for d in details: name=d['name'] iata=d['iata'] lat=d['lat'] lon=d['lon'] row_data=[] row_data.append(country) row_data.append(name) row_data.append(iata) row_data.append(lat) row_data.append(lon) final.append(row_data) file = open('country_airport.csv', 'a+', newline ='') with file: writer = csv.writer(file) #writer.writerow(header) writer.writerows(final) <file_sep>/src/depatureairportjob.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jun 18 01:33:01 2020 @author: tridhachaudhuri """ from pyflightdata import FlightData import csv from datetime import date import os api=FlightData() airport_iata=[] filename='' def airport_iata(): with open('US_airlines.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: line_count += 1 else: airport_iata.append(row[0]) def writeheader(filename): header = ['destination_airport_iata','number','callsign', 'live', 'text','type_Arrival','color','diverted', 'utc_millis','utc_date','utc_time','utc', 'local_date','local_time','model_code','model_name','registration', 'country_name','country_alpha2','country_alpha3','restricted','owner_name', 'owner_iata','owner_icao','airline_name','airline_iata','airline_icao','airline_short', 'origin_aiporttimezone_name','origin_aiporttimezone_offset','origin_aiporttimezone_abbr','origin_aiporttimezone_abbrname', 'origin_airport_terminal','origin_airport_baggage','origin_airport_gate', 'dest_aiportcode_iata','dest_aiportcode_icao','dest_airport_timezone_name','dest_airport_timezone_offset','dest_airport_timezone_abbr','dest_airport_timezone_abbrname', 'dest_airport_terminal','dest_airport_baggage','dest_airport_gate','dest_airport_latitude', 'dest_airport_longitude','dest_airport_countryname','dest_airport_countrycode','dest_airport_city', 'scheduled_departuredate','scheduled_departuretime','scheduled_arrivaldate','scheduled_arrivaltime', 'real_departuredate','real_departuretime','estimated_departuredate','estimated_departuretime'] #filename='airport_departure_US_'+str(date.today() )+'.csv' file = open(filename, 'a+', newline ='') with file: writer = csv.writer(file) writer.writerow(header) file.close() def writefile(final,filename): file = open(filename, 'a+', newline ='') with file: writer = csv.writer(file) writer.writerows(final) def writeraw(flightdata,airport): parentpath=<path-to-save>+str(date.today())+"/departure/" file=str(airport)+".txt" f1=open(parentpath+file,'w') if flightdata!=[]: f1.write(str(flightdata)) def apidepartedfile(airport_iata,filename): for airport in airport_iata: LAXflights=api.get_airport_departures(airport, limit=100) print(airport) final=[] writeraw(LAXflights,airport) for i in range(len(LAXflights)): number=LAXflights[i]['flight']['identification']['number']['default'] callsign=LAXflights[i]['flight']['identification']['callsign'] live=LAXflights[i]['flight']['status']['live'] text=LAXflights[i]['flight']['status']['text'] type_Arrival=LAXflights[i]['flight']['status']['generic']['status']['type'] color=LAXflights[i]['flight']['status']['generic']['status']['color'] diverted=LAXflights[i]['flight']['status']['generic']['status']['diverted'] try: utc_millis=LAXflights[i]['flight']['status']['generic']['eventTime']['utc_millis'] except: utc_millis="" try: utc_date=LAXflights[i]['flight']['status']['generic']['eventTime']['utc_date'] except: utc_date="" try: utc_time=LAXflights[i]['flight']['status']['generic']['eventTime']['utc_time'] except: utc_time="" utc=LAXflights[i]['flight']['status']['generic']['eventTime']['utc'] try: local_date=LAXflights[i]['flight']['status']['generic']['eventTime']['local_date'] except: local_date="" try: local_time=LAXflights[i]['flight']['status']['generic']['eventTime']['local_time'] except: local_time="" try: model_code=LAXflights[i]['flight']['aircraft']['model']['code'] except: model_code="" try: model_name=LAXflights[i]['flight']['aircraft']['model']['text'] except: model_name="" try: registration=LAXflights[i]['flight']['aircraft']['registration'] except: registration="" try: country_name=LAXflights[i]['flight']['aircraft']['country']['name'] except: country_name="" try: country_alpha2=LAXflights[i]['flight']['aircraft']['country']['alpha2'] except: country_alpha2="" try: country_alpha3=LAXflights[i]['flight']['aircraft']['country']['alpha3'] except: country_alpha3="" try: restricted=LAXflights[i]['flight']['aircraft']['restricted'] except: restricted="" try: owner_name= LAXflights[i]['flight']['owner']['name'] except: owner_name="" try: owner_iata=LAXflights[i]['flight']['owner']['code']['iata'] except: owner_iata="" try: owner_icao=LAXflights[i]['flight']['owner']['code']['icao'] except: owner_icao="" try: airline_name=LAXflights[i]['flight']['airline']['name'] except: airline_name="" try: airline_iata=LAXflights[i]['flight']['airline']['code']['iata'] except: airline_iata="" try: airline_icao=LAXflights[i]['flight']['airline']['code']['icao'] except: airline_icao="" try: airline_short=LAXflights[i]['flight']['airline']['short'] except: airline_short="" origin_aiporttimezone_name=LAXflights[i]['flight']['airport']['origin']['timezone']['name'] origin_aiporttimezone_offset=LAXflights[i]['flight']['airport']['origin']['timezone']['offset'] origin_aiporttimezone_abbr=LAXflights[i]['flight']['airport']['origin']['timezone']['abbr'] origin_aiporttimezone_abbrname=LAXflights[i]['flight']['airport']['origin']['timezone']['abbrName'] origin_airport_terminal=LAXflights[i]['flight']['airport']['origin']['info']['terminal'] origin_airport_baggage=LAXflights[i]['flight']['airport']['origin']['info']['baggage'] origin_airport_gate=LAXflights[i]['flight']['airport']['origin']['info']['gate'] dest_airport_terminal=LAXflights[i]['flight']['airport']['destination']['info']['terminal'] dest_airport_baggage=LAXflights[i]['flight']['airport']['destination']['info']['baggage'] dest_airport_gate=LAXflights[i]['flight']['airport']['destination']['info']['gate'] #dest_airport_name=LAXflights[i]['flight']['airport']['destination']['name'] dest_aiportcode_iata=LAXflights[i]['flight']['airport']['destination']['code']['iata'] dest_aiportcode_icao=LAXflights[i]['flight']['airport']['destination']['code']['icao'] dest_airport_latitude=LAXflights[i]['flight']['airport']['destination']['position']['latitude'] dest_airport_longitude=LAXflights[i]['flight']['airport']['destination']['position']['longitude'] dest_airport_countryname=LAXflights[i]['flight']['airport']['destination']['position']['country']['name'] dest_airport_countrycode=LAXflights[i]['flight']['airport']['destination']['position']['country']['code'] dest_airport_city=LAXflights[i]['flight']['airport']['destination']['position']['region']['city'] dest_airport_timezone_name=LAXflights[i]['flight']['airport']['destination']['timezone']['name'] dest_airport_timezone_offset=LAXflights[i]['flight']['airport']['destination']['timezone']['offset'] dest_airport_timezone_abbr=LAXflights[i]['flight']['airport']['destination']['timezone']['abbr'] dest_airport_timezone_abbrname=LAXflights[i]['flight']['airport']['destination']['timezone']['abbrName'] scheduled_departuredate=LAXflights[i]['flight']['time']['scheduled']['departure_date'] scheduled_departuretime=LAXflights[i]['flight']['time']['scheduled']['departure_time'] scheduled_arrivaldate=LAXflights[i]['flight']['time']['scheduled']['arrival_date'] scheduled_arrivaltime=LAXflights[i]['flight']['time']['scheduled']['arrival_time'] try: real_departuredate=LAXflights[i]['flight']['time']['real']['departure_date'] except: real_departuredate="" try: real_departuretime=LAXflights[i]['flight']['time']['real']['departure_time'] except: real_departuretime="" try: estimated_arrivaldate=LAXflights[i]['flight']['time']['estimated']['departure_date'] except: estimated_arrivaldate="" try: estimated_arrivaltime=LAXflights[i]['flight']['time']['estimated']['departure_time'] except: estimated_arrivaltime="" row_list=[] row_list.append(airport) row_list.append(number) row_list.append(callsign) row_list.append(live) row_list.append(text) row_list.append(type_Arrival) row_list.append(color) row_list.append(diverted) row_list.append(utc_millis) row_list.append(utc_date) row_list.append(utc_time) row_list.append(utc) row_list.append(local_date) row_list.append(local_time) row_list.append(model_code) row_list.append(model_name) row_list.append(registration) row_list.append(country_name) row_list.append(country_alpha2) row_list.append(country_alpha3) row_list.append(restricted) row_list.append(owner_name) row_list.append(owner_iata) row_list.append(owner_icao) row_list.append(airline_name) row_list.append(airline_iata) row_list.append(airline_icao) row_list.append(airline_short) row_list.append(origin_aiporttimezone_name) row_list.append(origin_aiporttimezone_offset) row_list.append(origin_aiporttimezone_abbr) row_list.append(origin_aiporttimezone_abbrname) row_list.append(origin_airport_terminal) row_list.append(origin_airport_baggage) row_list.append(origin_airport_gate) row_list.append(dest_aiportcode_iata) row_list.append(dest_aiportcode_icao) row_list.append(dest_airport_timezone_name) row_list.append(dest_airport_timezone_offset) row_list.append(dest_airport_timezone_abbr) row_list.append(dest_airport_timezone_abbrname) row_list.append(dest_airport_terminal) row_list.append(dest_airport_baggage) row_list.append(dest_airport_gate) row_list.append(dest_airport_latitude) row_list.append(dest_airport_longitude) row_list.append(dest_airport_countryname) row_list.append(dest_airport_countrycode) row_list.append(dest_airport_city) row_list.append(scheduled_departuredate) row_list.append(scheduled_departuretime) row_list.append(scheduled_arrivaldate) row_list.append(scheduled_arrivaltime) row_list.append(real_departuredate) row_list.append(real_departuretime) row_list.append(estimated_arrivaldate) row_list.append(estimated_arrivaltime) final.append(row_list) writefile(final,filename) <file_sep>/src/imagessave.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jun 12 04:10:47 2020 @author: tridhachaudhuri """ #from pyflightdata import FlightData import csv #api=FlightData() #import pandas as pd import requests """ images=[] with open('airport_info.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: line_count += 1 else: #images.append(row[19]) if row[19]!='': name="airportimages/png/"+row[0]+".png" uri=row[19] with open(name, 'wb') as f: f.write(requests.get(uri).content) """ import wikipedia with open('airport_info.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: lengthofimages=0 if line_count == 0: line_count += 1 else: #images.append(row[19]) if row[1]!='': try: wikipage = wikipedia.page(row[1]) except: continue lengthofimages=len(wikipage.images) for i in range(lengthofimages): name="airportimages/wiki/"+row[0]+str(i)+".png" uri=wikipage.images[i] with open(name, 'wb') as f: f.write(requests.get(uri).content) <file_sep>/src/README.md ## Files in Repo ### job.py This is the main function that needs to run. Everything is initiallized and then eventually run within this function. It calls all the other python scripts during their scheduled time after importing them. To schedule when the scripts needs to be called, the schedule package is used. Currently, the script is run once a day at around midnight. The script also performs multi-threading and all the other python scripts that are called through it are performed parallely. Each of the scripts that are to be called from the job.py file are put into its individual functions. ### arrivalairports.py The script creates an object of the pyflight api and uses it to call the get_airport_arrivals(airport_name) function. The list of airport names are passed on to it from job.py. The raw JSON file for each airport is stored into the "raw data" S3 bucket. The code processes the data by parsing through the JSON and writing the needed information into individual CSV files for each airport. This CSV is then moved to the "processed data" S3 bucket. ### departureairports.py The script creates an object of the pyflight api and uses it to call the get_airport_departures(airport_name) function. The list of airport names are passed on to it from job.py. The raw JSON file for each airport is stored into the "raw data" S3 bucket. The code processes the data by parsing through the JSON and writing the needed information into individual CSV files for each airport. This CSV is then moved to the "processed data" S3 bucket. ### flighttoandfrom.py ### historyofflightsjob.py The script creates an object of the pyflight api and uses it to call the api.get_history_by_flight_number(airline_id) function. The list of airline_id are passed on to it from job.py. The raw JSON file for each airport is stored into the "raw data" S3 bucket. ### awsupload.py ### country_info.py This script is run individually to get a list of all countries pyflight api holds information of. It creates an object of the pyflight api and uses it to call the get_countries() function which then returns a JSON file with all the needed countries and images of their flags. This data is then processed to a readable CSV format and the processed data is moved to the S3 bucket. ### country_airport.py This script is run individually to keep a list of the airports that belong to a particular country. The country names can be found from the CSV extracted by the coutry_info.py script. Through this script, we are able to gain all the airports that belong to the U.S. and Canada. It creates an object of the pyflight api and uses it to call the get_airports(country_name) function which then returns a JSON file with all the airports belonging to a particular country. This data is then processed to a readable CSV format and the processed data is moved to the S3 bucket. ### airport_review.py This script is run individually to keep a list of the reviews that belong to a individual aiports. The airport names can be found from the CSV extracted by the country_airport.py script. It creates an object of the pyflight api and uses it to call the get_airport_reviews(airport) function which then returns a JSON file with all the reviews and ratings belonging to a particular aiport. This data is then processed to a readable CSV format and the processed data is moved to the S3 bucket. ### infoairport.py To get delay indexes, elevation, timezone etc of an aiport, we use the get_airport_details(airport) function to return a JSON file with all these information for a particular airport. The airport names can be found from the CSV extracted by the country_airport.py script. This data is then processed to a readable CSV format and the processed data is moved to the S3 bucket. ### imagessave.py For each image URL link which consists in our database, the imagessave.py helps in extracting and storing image and gif using requests library in python. The code also extracts all pictures on Wikipedia for a search (airport names are searched) and save in S3 buckets. This is done by using the wikipedia library available in python. <file_sep>/src/historyofflightsjob.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jun 18 04:41:49 2020 @author: tridhachaudhuri """ from pyflightdata import FlightData from datetime import date api=FlightData() def file_creator(details,airline): fname="<path-to-save>"+str(date.today())+"/flighthistory/"+str(airline)+'.txt' file = open(fname, 'w') if details!=[]: file.write(str(details)) def flights(airlinesid): try: details=api.get_history_by_flight_number(str(airlinesid)) except TypeError: details=[] file_creator(details,airlinesid) <file_sep>/src/arrivalairports.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jun 18 02:59:25 2020 @author: tridhachaudhuri """ from pyflightdata import FlightData import csv from datetime import date api=FlightData() filename='' def writeheader(filename): header = ['arrival_airport_iata','number','callsign', 'live', 'text','type_Arrival','color','diverted', 'utc_millis','utc_date','utc_time','utc', 'local_date','local_time','model_code','model_name','registration', 'country_name','country_alpha2','country_alpha3','restricted','owner_name', 'owner_iata','owner_icao','airline_name','airline_iata','airline_icao','airline_short', 'origin_aiportcode_iata','origin_aiportcode_icao','origin_aiporttimezone_name','origin_aiporttimezone_offset','origin_aiporttimezone_abbr','origin_aiporttimezone_abbrname','origin_airport_terminal','origin_airport_baggage','origin_airport_gate','origin_airport_name','origin_airport_latitude','origin_airport_longitude','origin_airport_countryname','origin_airport_countrycode','origin_airport_city','dest_airport_timezone_name','dest_airport_timezone_offset','dest_airport_timezone_abbr','dest_airport_timezone_abbrname','dest_airport_terminal','dest_airport_baggage','dest_airport_gate','scheduled_departuredate','scheduled_departuretime','scheduled_arrivaldate','scheduled_arrivaltime', 'real_departuredate','real_departuretime','estimated_arrivaldate','estimated_arrivaltime'] file = open(filename, 'a+', newline ='') with file: writer = csv.writer(file) writer.writerow(header) file.close() def writefile(final,filename): file = open(filename, 'a+', newline ='') with file: writer = csv.writer(file) writer.writerows(final) def writeraw(flightdata,airport): parentpath=<path-to-save>+str(date.today())+"/arrivals/" file=str(airport)+".txt" f1=open(parentpath+file,'w') if flightdata!=[]: f1.write(str(flightdata)) def apiarrivalfile(airport_iata,filename): for airport in airport_iata: LAXflights=api.get_airport_arrivals(airport, limit=100) print(airport) writeraw(LAXflights,airport) final=[] for i in range(len(LAXflights)): number=LAXflights[i]['flight']['identification']['number']['default'] callsign=LAXflights[i]['flight']['identification']['callsign'] live=LAXflights[i]['flight']['status']['live'] text=LAXflights[i]['flight']['status']['text'] type_Arrival=LAXflights[i]['flight']['status']['generic']['status']['type'] color=LAXflights[i]['flight']['status']['generic']['status']['color'] diverted=LAXflights[i]['flight']['status']['generic']['status']['diverted'] try: utc_millis=LAXflights[i]['flight']['status']['generic']['eventTime']['utc_millis'] except: utc_millis="" try: utc_date=LAXflights[i]['flight']['status']['generic']['eventTime']['utc_date'] except: utc_date="" try: utc_time=LAXflights[i]['flight']['status']['generic']['eventTime']['utc_time'] except: utc_time="" utc=LAXflights[i]['flight']['status']['generic']['eventTime']['utc'] try: local_date=LAXflights[i]['flight']['status']['generic']['eventTime']['local_date'] except: local_date="" try: local_time=LAXflights[i]['flight']['status']['generic']['eventTime']['local_time'] except: local_time="" try: model_code=LAXflights[i]['flight']['aircraft']['model']['code'] except: model_code="" try: model_name=LAXflights[i]['flight']['aircraft']['model']['text'] except: model_name="" try: registration=LAXflights[i]['flight']['aircraft']['registration'] except: registration="" try: country_name=LAXflights[i]['flight']['aircraft']['country']['name'] except: country_name="" try: country_alpha2=LAXflights[i]['flight']['aircraft']['country']['alpha2'] except: country_alpha2="" try: country_alpha3=LAXflights[i]['flight']['aircraft']['country']['alpha3'] except: country_alpha3="" try: restricted=LAXflights[i]['flight']['aircraft']['restricted'] except: restricted="" try: owner_name= LAXflights[i]['flight']['owner']['name'] except: owner_name="" try: owner_iata=LAXflights[i]['flight']['owner']['code']['iata'] except: owner_iata="" try: owner_icao=LAXflights[i]['flight']['owner']['code']['icao'] except: owner_icao="" try: airline_name=LAXflights[i]['flight']['airline']['name'] except: airline_name="" try: airline_iata=LAXflights[i]['flight']['airline']['code']['iata'] except: airline_iata="" try: airline_icao=LAXflights[i]['flight']['airline']['code']['icao'] except: airline_icao="" try: airline_short=LAXflights[i]['flight']['airline']['short'] except: airline_short="" origin_aiportcode_iata=LAXflights[i]['flight']['airport']['origin']['code']['iata'] origin_aiportcode_icao=LAXflights[i]['flight']['airport']['origin']['code']['icao'] origin_aiporttimezone_name=LAXflights[i]['flight']['airport']['origin']['timezone']['name'] origin_aiporttimezone_offset=LAXflights[i]['flight']['airport']['origin']['timezone']['offset'] origin_aiporttimezone_abbr=LAXflights[i]['flight']['airport']['origin']['timezone']['abbr'] origin_aiporttimezone_abbrname=LAXflights[i]['flight']['airport']['origin']['timezone']['abbrName'] origin_airport_terminal=LAXflights[i]['flight']['airport']['origin']['info']['terminal'] origin_airport_baggage=LAXflights[i]['flight']['airport']['origin']['info']['baggage'] origin_airport_gate=LAXflights[i]['flight']['airport']['origin']['info']['gate'] origin_airport_name=LAXflights[i]['flight']['airport']['origin']['name'] origin_airport_latitude=LAXflights[i]['flight']['airport']['origin']['position']['latitude'] origin_airport_longitude=LAXflights[i]['flight']['airport']['origin']['position']['longitude'] origin_airport_countryname=LAXflights[i]['flight']['airport']['origin']['position']['country']['name'] origin_airport_countrycode=LAXflights[i]['flight']['airport']['origin']['position']['country']['code'] origin_airport_city=LAXflights[i]['flight']['airport']['origin']['position']['region']['city'] dest_airport_timezone_name=LAXflights[i]['flight']['airport']['destination']['timezone']['name'] dest_airport_timezone_offset=LAXflights[i]['flight']['airport']['destination']['timezone']['offset'] dest_airport_timezone_abbr=LAXflights[i]['flight']['airport']['destination']['timezone']['abbr'] dest_airport_timezone_abbrname=LAXflights[i]['flight']['airport']['destination']['timezone']['abbrName'] dest_airport_terminal=LAXflights[i]['flight']['airport']['destination']['info']['terminal'] dest_airport_baggage=LAXflights[i]['flight']['airport']['destination']['info']['baggage'] dest_airport_gate=LAXflights[i]['flight']['airport']['destination']['info']['gate'] scheduled_departuredate=LAXflights[i]['flight']['time']['scheduled']['departure_date'] scheduled_departuretime=LAXflights[i]['flight']['time']['scheduled']['departure_time'] scheduled_arrivaldate=LAXflights[i]['flight']['time']['scheduled']['arrival_date'] scheduled_arrivaltime=LAXflights[i]['flight']['time']['scheduled']['arrival_time'] try: real_departuredate=LAXflights[i]['flight']['time']['real']['departure_date'] except: real_departuredate="" try: real_departuretime=LAXflights[i]['flight']['time']['real']['departure_time'] except: real_departuretime="" try: estimated_arrivaldate=LAXflights[i]['flight']['time']['estimated']['arrival_date'] except: estimated_arrivaldate="" try: estimated_arrivaltime=LAXflights[i]['flight']['time']['estimated']['arrival_time'] except: estimated_arrivaltime="" row_list=[] row_list.append(airport) row_list.append(number) row_list.append(callsign) row_list.append(live) row_list.append(text) row_list.append(type_Arrival) row_list.append(color) row_list.append(diverted) row_list.append(utc_millis) row_list.append(utc_date) row_list.append(utc_time) row_list.append(utc) row_list.append(local_date) row_list.append(local_time) row_list.append(model_code) row_list.append(model_name) row_list.append(registration) row_list.append(country_name) row_list.append(country_alpha2) row_list.append(country_alpha3) row_list.append(restricted) row_list.append(owner_name) row_list.append(owner_iata) row_list.append(owner_icao) row_list.append(airline_name) row_list.append(airline_iata) row_list.append(airline_icao) row_list.append(airline_short) row_list.append(origin_aiportcode_iata) row_list.append(origin_aiportcode_icao) row_list.append(origin_aiporttimezone_name) row_list.append(origin_aiporttimezone_offset) row_list.append(origin_aiporttimezone_abbr) row_list.append(origin_aiporttimezone_abbrname) row_list.append(origin_airport_terminal) row_list.append(origin_airport_baggage) row_list.append(origin_airport_gate) row_list.append(origin_airport_name) row_list.append(origin_airport_latitude) row_list.append(origin_airport_longitude) row_list.append(origin_airport_countryname) row_list.append(origin_airport_countrycode) row_list.append(origin_airport_city) row_list.append(dest_airport_timezone_name) row_list.append(dest_airport_timezone_offset) row_list.append(dest_airport_timezone_abbr) row_list.append(dest_airport_timezone_abbrname) row_list.append(dest_airport_terminal) row_list.append(dest_airport_baggage) row_list.append(dest_airport_gate) row_list.append(scheduled_departuredate) row_list.append(scheduled_departuretime) row_list.append(scheduled_arrivaldate) row_list.append(scheduled_arrivaltime) row_list.append(real_departuredate) row_list.append(real_departuretime) row_list.append(estimated_arrivaldate) row_list.append(estimated_arrivaltime) final.append(row_list) writefile(final,filename) <file_sep>/README.md # AirPool by <NAME> ## Table of Contents 1. [Problem](README.md#problem) 2. [Project Idea](README.md#project-idea) 3. [Data](README.md#data) 4. [Use Case](README.md#use-case) 5. [Framework](README.md#framework) 6. [Demo](README.md#demo) 7. [Engineering Challenges](README.md#engineering-challenges) 8. [Future Work](README.md#future-work) 9. [Contact Information](README.md#contact-information) ## Problem The growth of an organization steadily leads to more data or rather big data which is stored across a number of locations and formats including relational databases, NoSQL databases, and logs. This data can be used to gain big insights and drive relevant actions and operations to achieve whatever outcome: big data analytics with a purpose; smart data for smart applications – and inevitably artificial intelligence to make sense of all that data. As more data is collected by an organization, the data landscape becomes increasingly fragmented, complex, and siloed and using a data warehouse alone become insufficient. <b> Solution: </b></br> Unlike relational engines and databases, which require highly structured data, Data Lakes enables organizations to store all kinds of data in an unstructured or semi-structured format. The cost of that storage is a tiny fraction of the cost of keeping it in a relational database. This has enabled organizations to hold onto much larger quantities of data than they could before. The data stored in the Data Lake are in its rawest form without needing to be converted and analyzed first. Some [business benefits](https://www.smartdatacollective.com/business-and-technological-benefits-of-data-lakes/) of Data Lakes include: * <b>Scalable: </b></br> Unlike traditional data warehouse, Data Leaks offers scalability and is inexpensive as well * <b>Versatile: </b></br> A data lake can store both structured and unstructured data from diverse sources. In other words, it can store XML, logs, multimedia, sensor data, chat, social data, binary, and people data * <b>Schema Flexibility: </b></br> For traditional schema, you need to have your data in a structured format. Traditional data warehouse products are schema based. But for analytics, this could prove to be a glitch as the data needs to be analyzed in its raw form. Data Lake enables you to be schema free, or you could come up with multiple schemas for the same data. In short, it allows you to separate schema from data, which is good for analytics. ## Project Idea To build a centralized data storage that can interoperate across multiple [unstructured and semi-structured data sets](README.md#data) as one “single” data lake, I used <b>historical and real time flight data</b>. Necessary raw data is then processed and is passed through a [data pipeline](README.md#framework) built on [Amazon Web Services](https://aws.amazon.com/big-data/datalakes-and-analytics/what-is-a-data-lake/) so that it is possible to discover and manage the data for operational analytics and visualization. ## Data ### Data Sources #### 1. **pyflightdata API**</br> [pyflightdata API](https://pyflightdata.readthedocs.io/en/latest/about.html) is a python script that can be installed and imported through Python to automatically get data from flightradar24, a popular site to get data related to an airline or particular flight and analyze it. For this project, pyflightdata API was used to get the following data: * list and details on countries, airlines and airport data * all aircrafts arriving at a particular airport * all aircrafts departing from a particular airport * all aircrafts onground on a particular airport * airport review data * airport performance statistics data * airport weather data * fleet for a particular airline * particular flight arriving and departing on a particular date * all flights from a particular origin to a particular destination * history of a flight by its flight number * history of a particular aircraft by its tail number * images of a particular aircraft by its tail number The API allows to extract 100 datapoints for each query and outputs each of its query in a unstructured JSON format. The code to get data from the API is executed **once** a day and the necessary raw JSON data is processed to queryable CSV files immediately (also once a day). The complete raw data and the processed CSV data is then uploaded and stored in S3 buckets in the AWS Cloud. #### 2. **Bureau of Transportation Statistics**</br> [The Bureau of Transportation Statistics (BTS)](https://www.bts.gov/), part of the Department of Transportation (DOT) is the preeminent source of statistics on commercial aviation, multimodal freight activity, and transportation economics, and provides context to decision makers and the public for understanding statistics on transportation. For this project, Bureau of Transportation Statistics was used to get the following data: * Quaterly U.S. Airline Financial Data * Historical total, cancelled and delayed flights by days (stored monthly) * Weight of Cargo in U.S. and Foreign Airlines * Quarterly total airfair data * Monthly U.S. Passenger Airline Employment Data * Quaterly average Domestic Airline Itinerary Fares By Origin City * Annual Airlines and Airport Rankings * Major U.S. Carriers Form 41 statistical and financial databases The information and tables available from BTS is structured or semi-structured in nature and require little to no processing to be read and gain insight from. #### 3. **data.world**</br> [data.world](https://data.world/) is home to the world’s largest collaborative data community, which is free and open to the public. It’s where people discover data, share analysis, and team up on everything. For my project, I have used this data source to download specific airlines data to particular cities and countries namely airlines data from the SJO aiport. For this project, data.world was used to get the following data: * Historical aviation Safety dataset * Flight routes from different countries All the datasets from data.world are downloadable and come in queryable CSV format. #### 4. **openflights.org**</br> As of January 2017, the [OpenFlights Airports Database](https://openflights.org/data.html) contains over 10,000 airports, train stations and ferry terminals spanning the globe. For this project, openflights.org was used to get the following data: * 67663 Routing Data between 3321 airports on 548 airlines spanning the globe * Airport and airline search The database in openflights.org are all unstructed .dat files and need to be processed for querying ### Quantifying the data #### Unstructured / raw data * **~10 GB data** of scheduling and statistics information and **~2GB worth images** * Raw scheduling information of live aircrafts to be **updated once a day** * **~5000 airport** and **~50,000 live airline information** from **250 countries** * **20 years** of international and domestic (US) statistics data * **Variety in data:** JSON files, .dat files, .txt files, images, gifs #### Structured / processed data * **~5 GB data** of scheduling and statistics information * Processing pipeline to be **executed once a day** * **Data format:** queryable CSVs with well-defined schema ## Use Case As the data which is being stored in the data lake are varied in terms of structure, schema as well as use case, I will be taking up one use case to explain the framework and the ETL pipeline completely. This way, there would be clearer understandability on how the pipeline is running and what each service is being used for. While there are multiple use cases which can be formed from the [data](README.md#data) collected, the use case I will be using is as follows - Pipeline to process information regarding the schedule of major U.S. and Canadian Airports. This would include any flight departing and arriving at major U.S. airports and their current flight status. ## Framework ![Pipeline](docs/pipeline.png) ### 1. Data The structured as well as unstructed data is stored in S3 buckets. The raw unstructured API data is gotten through running a python code in batches (once or twice a day) by using a scheduler. For a scheduler, I am using the schedule python package. An improvement to this would be to use Airflow. This data is then processed by using python and Spark and the processed data is then stored in the S3 bucket. There are two S3 buckets, one for the raw data and the other for the processed data. The AWS ETL pipeline crawls data from the processed data S3 bucket. For my use case, the raw data would be the unstructured JSON file of airlines currently arriving and departing from every major U.S. and Canadian Airports and all flights from a particular origin to a particular destination in U.S and Canada. This data is then processed using python and Spark and stored in the processed data S3 bucket. ### 2. AWS S3 bucket Amazon Simple Storage Service (Amazon S3) is an object storage service that offers industry-leading scalability, data availability, security, and performance. For this project, I have used S3 buckets to store raw and processed flight data. ### 3. Lake Formation AWS Lake Formation is a service that makes it easy to set up a secure data lake in days. It helps you collect and catalog data from databases and object storage, move the data into your new Amazon S3 data lake, clean and classify your data, and secure access to your sensitive data. Users can access a centralized data catalog which describes available data sets and their appropriate usage. AWS Lake Formation is based off AWS Glues and is used to create databases and catalog any tables crawled off AWS Glue. An example of such a table would be a queryable table formed after AWS Glue crawls through all flights departing from LAX. We can then give access to users to query particular columns or rows in the tables, or the table itself. ### 4. AWS Glue AWS Glue is a fully managed extract, transform, and load (ETL) service that makes it easy for customers to prepare and load their data for analytics. Glue crawls through mutliple CSVs with similar schema, such as all flights departing from multiple U.S. and Canadian airports would be stored in different CSVs and have the same schema, and build a queryable table from the data. This table can then be queryed using AWS Athena (necessary permissions and roles need to be created) ### 5. Amazon Athena Amazon Athena is an interactive query service that makes it easy to analyze data in Lake Formation using standard SQL. Athena is serverless, so there is no infrastructure to manage, and you pay only for the queries that you run. Through Athena, we can query the table of departing flights and find out which "American Airlines" flight left LAX on that particular day and where it's destination was. ### 6. Amazon QuickSight Amazon QuickSight is a fast, cloud-powered business intelligence service that makes it easy to deliver insights to everyone in your organization. QuickSight is used in project as a front-end and helps in visualizing any data that can be queried by AWS Athena. We could also directly visualize structured data that is stored in the S3 bucket. ## Demo ![Demo1](docs/demo1.png) ![Demo2](docs/demo2.png) ![Demo3](docs/demo3.png) ![Demo4](docs/demo4.png) ## Engineering Challenges ### Lack of rigid Schema while crawling tables While crawling through multiple CSVs with similar (but not the same) schema, AWS Glue tends to change the schema structure by shifting certain columns to another while creating its queryable tables. A way to solve this problem would be explicitly define the schema manually before crawling the processed files. This way AWS Glue would know which columns to crawl and if the columns are not available in certain files, it would skip them or leave them empty depending on what was described. ## Future Work * Query images and add them into the pipeline * Create AWS Glue workflows for different data structures * Use Airflow for scheduling ## Contact Information * [<NAME>](https://www.linkedin.com/in/tridha-chaudhuri/) * <EMAIL> <file_sep>/src/country_info.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jun 11 01:30:51 2020 @author: tridhachaudhuri """ from pyflightdata import FlightData import csv api=FlightData() header = ['country','img'] file = open('country_list.csv', 'a+', newline ='') with file: writer = csv.writer(file) writer.writerow(header) file.close() details=api.get_countries() final=[] for d in details: country=d['country'] img=d['img'] row_list=[] row_list.append(country) row_list.append(img) final.append(row_list) file = open('country_list.csv', 'a+', newline ='') with file: writer = csv.writer(file) #writer.writerow(header) writer.writerows(final) <file_sep>/src/infoairport.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 10 22:23:15 2020 @author: tridhachaudhuri """ from pyflightdata import FlightData import csv api=FlightData() #import pandas as pd airport_iata=[] with open('US_airlines.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: line_count += 1 else: airport_iata.append(row[0]) #airport_iata=["LAX","JFK"] header = ['airport_iata','name','iatacode','icaocode','delayindex_arrivals','delayindex_departures','stats', 'latitude','longitude','elevation_m','elevation_ft','timezone_name','timezone_offset', 'timezone_abbr','timezone_abbrname','url_homepage','url_webcam','url_wiki','img_src','img_link', 'img_source','visible'] file = open('airport_info.csv', 'a+', newline ='') with file: writer = csv.writer(file) writer.writerow(header) file.close() final=[] for airport in airport_iata: try: details=api.get_airport_details(airport) #weather=api.get_airport_weather((airport)) #add this - imp name=details['name'] iatacode=details['code']['iata'] icaocode=details['code']['icao'] delayindex_arrivals=details['delayIndex']['arrivals'] delayindex_departures=details['delayIndex']['departures'] stats=details['stats'] latitude=details['position']['latitude'] longitude=details['position']['longitude'] try: elevation_m=details['position']['elevation']['m'] except: elevation_m="" try: elevation_ft=details['position']['elevation']['ft'] except: elevation_ft="" timezone_name=details['timezone']['name'] timezone_offset=details['timezone']['offset'] timezone_abbr=details['timezone']['abbr'] timezone_abbrname=details['timezone']['abbrName'] url_homepage=details['url']['homepage'] url_webcam=details['url']['webcam'] url_wiki=details['url']['wikipedia'] try: img_src=details['airportImages']['large'][0]['src'] except: img_src="" try: img_link=details['airportImages']['large'][0]['link'] except: img_link="" try: img_source=details['airportImages']['large'][0]['source'] except: img_source="" visible=details['visible'] row_list=[] row_list.append(airport) row_list.append(name) row_list.append(iatacode) row_list.append(icaocode) row_list.append(delayindex_arrivals) row_list.append(delayindex_departures) row_list.append(stats) row_list.append(latitude) row_list.append(longitude) row_list.append(elevation_m) row_list.append(elevation_ft) row_list.append(timezone_name) row_list.append(timezone_offset) row_list.append(timezone_abbr) row_list.append(timezone_abbrname) row_list.append(url_homepage) row_list.append(url_webcam) row_list.append(url_wiki) row_list.append(img_src) row_list.append(img_link) row_list.append(img_source) row_list.append(visible) final.append(row_list) except: print(airport) file = open('airport_info.csv', 'a+', newline ='') with file: writer = csv.writer(file) #writer.writerow(header) writer.writerows(final) <file_sep>/src/airport_review.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 10 22:23:15 2020 @author: tridhachaudhuri """ from pyflightdata import FlightData import csv api=FlightData() #import pandas as pd airport_iata=[] with open('US_airlines.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: line_count += 1 else: airport_iata.append(row[0]) #airport_iata=["LAX","JFK"] header = ['airport_iata','url','avg_rating','total_rating','comment','author_facebookid','author_name', 'timestamp_date','timestamp_time','number_of_reviews','number_of_evaluation'] file = open('airport_review.csv', 'a+', newline ='') with file: writer = csv.writer(file) writer.writerow(header) file.close() final=[] for airport in airport_iata: #print(airport) details=api.get_airport_reviews(airport) #weather=api.get_airport_weather((airport)) #add this - imp try: url=details['url'] except: url="" try: avg_rating=details['ratings']['avg'] total_rating=details['ratings']['total'] except: avg_rating="" total_rating="" try: comment=details['comment'][0]['content'] except: comment="" try: author_facebookid=details['comment'][0]['author']['facebookId'] author_name=details['comment'][0]['author']['name'] except: author_facebookid="" author_name="" try: timestamp_date=details['comment'][0]['timestamp_date'] timestamp_time=details['comment'][0]['timestamp_time'] except: timestamp_time="" timestamp_date="" number_of_reviews=details['reviews'] number_of_evaluation=details['evaluation'] row_list=[] row_list.append(airport) row_list.append(url) row_list.append(avg_rating) row_list.append(total_rating) row_list.append(comment) row_list.append(author_facebookid) row_list.append(author_name) row_list.append(timestamp_date) row_list.append(timestamp_time) row_list.append(number_of_reviews) row_list.append(number_of_evaluation) final.append(row_list) file = open('airport_review.csv', 'a+', newline ='') with file: writer = csv.writer(file) #writer.writerow(header) writer.writerows(final) <file_sep>/docs/README.md CSVs and PNG used to run the code
19bf540a2ca648c38e24f7c8070c86700b560722
[ "Markdown", "Python" ]
12
Python
tridhachaudhuri/AirPool
2e7e24cb957b077de8c7552bdd099f7c1e7bd816
84db8992a3c8515bbe039f2dea087f1110bbecdb
refs/heads/master
<file_sep>var winData=[ { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC191_kB5-F48", "buy_num": 27, "unit_price": 120, "unit_num": 9, "stock": 30, "discount_price": 293, "package_price": 1080 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC50_kB18-F12", "buy_num": 21, "unit_price": 101, "unit_num": 9, "stock": 9, "discount_price": 24, "package_price": 909 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC131_kB11-F22", "buy_num": 9, "unit_price": 49, "unit_num": 35, "stock": 19, "discount_price": 114, "package_price": 1715 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC90_kB12-F13", "buy_num": 16, "unit_price": 2, "unit_num": 36, "stock": 20, "discount_price": 58, "package_price": 72 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "Женские босоножки", "brand": "CAMIDY", "sku": "RC131_kB14-F25", "buy_num": 11, "unit_price": 147, "unit_num": 36, "stock": 26, "discount_price": 105, "package_price": 5292 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "Детские сандалии", "brand": "24PFM", "sku": "RC105_kB19-F20", "buy_num": 1, "unit_price": 179, "unit_num": 32, "stock": 20, "discount_price": 104, "package_price": 5728 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC12_kB16-F34", "buy_num": 6, "unit_price": 78, "unit_num": 12, "stock": 6, "discount_price": 138, "package_price": 936 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC9_kB15-F22", "buy_num": 14, "unit_price": 133, "unit_num": 15, "stock": 15, "discount_price": 109, "package_price": 1995 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "Детские ботинки", "brand": "BAOLUOMILAN", "sku": "RC141_kB8-F27", "buy_num": 25, "unit_price": 48, "unit_num": 7, "stock": 12, "discount_price": 213, "package_price": 336 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC18_kB6-F12", "buy_num": 1, "unit_price": 154, "unit_num": 17, "stock": 5, "discount_price": 83, "package_price": 2618 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC36_kB13-F42", "buy_num": 6, "unit_price": 165, "unit_num": 11, "stock": 36, "discount_price": 180, "package_price": 1815 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC105_kB5-F30", "buy_num": 24, "unit_price": 95, "unit_num": 31, "stock": 13, "discount_price": 183, "package_price": 2945 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC111_kB9-F44", "buy_num": 18, "unit_price": 181, "unit_num": 27, "stock": 19, "discount_price": 32, "package_price": 4887 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC108_kB19-F33", "buy_num": 22, "unit_price": 127, "unit_num": 17, "stock": 20, "discount_price": 80, "package_price": 2159 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC134_kB8-F27", "buy_num": 22, "unit_price": 75, "unit_num": 5, "stock": 9, "discount_price": 11, "package_price": 375 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC164_kB17-F24", "buy_num": 6, "unit_price": 181, "unit_num": 17, "stock": 25, "discount_price": 196, "package_price": 3077 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC146_kB12-F16", "buy_num": 30, "unit_price": 89, "unit_num": 26, "stock": 15, "discount_price": 53, "package_price": 2314 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC118_kB12-F23", "buy_num": 26, "unit_price": 40, "unit_num": 5, "stock": 11, "discount_price": 42, "package_price": 200 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC42_kB15-F26", "buy_num": 19, "unit_price": 21, "unit_num": 26, "stock": 12, "discount_price": 104, "package_price": 546 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC86_kB18-F37", "buy_num": 24, "unit_price": 25, "unit_num": 16, "stock": 39, "discount_price": 222, "package_price": 400 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC38_kB20-F15", "buy_num": 6, "unit_price": 162, "unit_num": 33, "stock": 36, "discount_price": 16, "package_price": 5346 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC187_kB6-F14", "buy_num": 22, "unit_price": 136, "unit_num": 27, "stock": 14, "discount_price": 146, "package_price": 3672 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC28_kB8-F12", "buy_num": 30, "unit_price": 192, "unit_num": 14, "stock": 20, "discount_price": 211, "package_price": 2688 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC66_kB13-F12", "buy_num": 22, "unit_price": 6, "unit_num": 17, "stock": 3, "discount_price": 43, "package_price": 102 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC35_kB15-F32", "buy_num": 3, "unit_price": 43, "unit_num": 15, "stock": 7, "discount_price": 277, "package_price": 645 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC156_kB7-F45", "buy_num": 12, "unit_price": 121, "unit_num": 22, "stock": 3, "discount_price": 233, "package_price": 2662 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC74_kB19-F25", "buy_num": 2, "unit_price": 25, "unit_num": 5, "stock": 13, "discount_price": 219, "package_price": 125 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC119_kB4-F43", "buy_num": 16, "unit_price": 114, "unit_num": 22, "stock": 27, "discount_price": 50, "package_price": 2508 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC117_kB6-F14", "buy_num": 13, "unit_price": 38, "unit_num": 14, "stock": 32, "discount_price": 194, "package_price": 532 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC140_kB11-F18", "buy_num": 27, "unit_price": 1, "unit_num": 23, "stock": 9, "discount_price": 149, "package_price": 23 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC145_kB8-F40", "buy_num": 13, "unit_price": 148, "unit_num": 7, "stock": 30, "discount_price": 82, "package_price": 1036 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC23_kB14-F22", "buy_num": 24, "unit_price": 160, "unit_num": 8, "stock": 36, "discount_price": 157, "package_price": 1280 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC80_kB19-F41", "buy_num": 25, "unit_price": 91, "unit_num": 14, "stock": 24, "discount_price": 220, "package_price": 1274 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC14_kB17-F10", "buy_num": 16, "unit_price": 85, "unit_num": 35, "stock": 5, "discount_price": 117, "package_price": 2975 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC3_kB5-F43", "buy_num": 4, "unit_price": 146, "unit_num": 25, "stock": 2, "discount_price": 183, "package_price": 3650 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC190_kB4-F50", "buy_num": 15, "unit_price": 81, "unit_num": 30, "stock": 37, "discount_price": 299, "package_price": 2430 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC82_kB10-F41", "buy_num": 26, "unit_price": 25, "unit_num": 5, "stock": 6, "discount_price": 297, "package_price": 125 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC26_kB19-F40", "buy_num": 3, "unit_price": 8, "unit_num": 36, "stock": 27, "discount_price": 16, "package_price": 288 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC129_kB19-F26", "buy_num": 12, "unit_price": 92, "unit_num": 8, "stock": 27, "discount_price": 87, "package_price": 736 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC25_kB6-F38", "buy_num": 30, "unit_price": 167, "unit_num": 12, "stock": 36, "discount_price": 144, "package_price": 2004 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC135_kB6-F38", "buy_num": 9, "unit_price": 12, "unit_num": 11, "stock": 7, "discount_price": 235, "package_price": 132 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC180_kB16-F42", "buy_num": 22, "unit_price": 77, "unit_num": 21, "stock": 18, "discount_price": 248, "package_price": 1617 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC154_kB4-F41", "buy_num": 6, "unit_price": 67, "unit_num": 21, "stock": 29, "discount_price": 189, "package_price": 1407 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC196_kB12-F33", "buy_num": 24, "unit_price": 2, "unit_num": 5, "stock": 24, "discount_price": 229, "package_price": 10 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC147_kB10-F35", "buy_num": 19, "unit_price": 180, "unit_num": 4, "stock": 27, "discount_price": 142, "package_price": 720 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC152_kB12-F14", "buy_num": 10, "unit_price": 187, "unit_num": 22, "stock": 23, "discount_price": 255, "package_price": 4114 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC35_kB14-F23", "buy_num": 23, "unit_price": 52, "unit_num": 22, "stock": 0, "discount_price": 280, "package_price": 1144 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC176_kB17-F25", "buy_num": 7, "unit_price": 85, "unit_num": 28, "stock": 38, "discount_price": 74, "package_price": 2380 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC45_kB11-F22", "buy_num": 3, "unit_price": 26, "unit_num": 25, "stock": 38, "discount_price": 174, "package_price": 650 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC183_kB13-F49", "buy_num": 9, "unit_price": 181, "unit_num": 23, "stock": 17, "discount_price": 71, "package_price": 4163 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC189_kB10-F32", "buy_num": 27, "unit_price": 108, "unit_num": 14, "stock": 24, "discount_price": 10, "package_price": 1512 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC86_kB5-F21", "buy_num": 30, "unit_price": 70, "unit_num": 30, "stock": 13, "discount_price": 210, "package_price": 2100 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC50_kB13-F47", "buy_num": 23, "unit_price": 22, "unit_num": 7, "stock": 35, "discount_price": 293, "package_price": 154 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC60_kB6-F32", "buy_num": 23, "unit_price": 17, "unit_num": 24, "stock": 40, "discount_price": 22, "package_price": 408 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC7_kB8-F36", "buy_num": 21, "unit_price": 118, "unit_num": 17, "stock": 5, "discount_price": 75, "package_price": 2006 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC95_kB13-F23", "buy_num": 11, "unit_price": 110, "unit_num": 15, "stock": 36, "discount_price": 109, "package_price": 1650 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC6_kB18-F40", "buy_num": 13, "unit_price": 192, "unit_num": 32, "stock": 30, "discount_price": 199, "package_price": 6144 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC165_kB7-F21", "buy_num": 18, "unit_price": 102, "unit_num": 10, "stock": 39, "discount_price": 272, "package_price": 1020 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC189_kB5-F15", "buy_num": 4, "unit_price": 24, "unit_num": 12, "stock": 18, "discount_price": 119, "package_price": 288 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC112_kB11-F14", "buy_num": 10, "unit_price": 105, "unit_num": 23, "stock": 25, "discount_price": 114, "package_price": 2415 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC158_kB9-F33", "buy_num": 5, "unit_price": 72, "unit_num": 8, "stock": 4, "discount_price": 183, "package_price": 576 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC2_kB14-F35", "buy_num": 14, "unit_price": 197, "unit_num": 26, "stock": 36, "discount_price": 104, "package_price": 5122 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC86_kB11-F11", "buy_num": 25, "unit_price": 91, "unit_num": 11, "stock": 38, "discount_price": 107, "package_price": 1001 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC183_kB18-F12", "buy_num": 12, "unit_price": 190, "unit_num": 8, "stock": 20, "discount_price": 193, "package_price": 1520 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC53_kB4-F38", "buy_num": 26, "unit_price": 76, "unit_num": 28, "stock": 18, "discount_price": 227, "package_price": 2128 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC26_kB16-F26", "buy_num": 24, "unit_price": 22, "unit_num": 17, "stock": 21, "discount_price": 212, "package_price": 374 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC199_kB17-F13", "buy_num": 11, "unit_price": 180, "unit_num": 18, "stock": 11, "discount_price": 197, "package_price": 3240 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC61_kB11-F15", "buy_num": 28, "unit_price": 113, "unit_num": 9, "stock": 21, "discount_price": 42, "package_price": 1017 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC189_kB5-F47", "buy_num": 14, "unit_price": 173, "unit_num": 21, "stock": 29, "discount_price": 142, "package_price": 3633 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC48_kB6-F18", "buy_num": 29, "unit_price": 36, "unit_num": 22, "stock": 4, "discount_price": 46, "package_price": 792 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC99_kB15-F27", "buy_num": 18, "unit_price": 3, "unit_num": 6, "stock": 35, "discount_price": 204, "package_price": 18 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC151_kB19-F23", "buy_num": 8, "unit_price": 5, "unit_num": 33, "stock": 2, "discount_price": 171, "package_price": 165 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC47_kB4-F31", "buy_num": 17, "unit_price": 104, "unit_num": 14, "stock": 23, "discount_price": 59, "package_price": 1456 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC161_kB10-F30", "buy_num": 26, "unit_price": 182, "unit_num": 11, "stock": 13, "discount_price": 273, "package_price": 2002 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC134_kB9-F20", "buy_num": 30, "unit_price": 199, "unit_num": 29, "stock": 36, "discount_price": 54, "package_price": 5771 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC79_kB11-F40", "buy_num": 17, "unit_price": 198, "unit_num": 32, "stock": 39, "discount_price": 218, "package_price": 6336 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC3_kB15-F48", "buy_num": 19, "unit_price": 28, "unit_num": 16, "stock": 36, "discount_price": 294, "package_price": 448 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC53_kB6-F24", "buy_num": 29, "unit_price": 83, "unit_num": 33, "stock": 38, "discount_price": 251, "package_price": 2739 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC85_kB18-F50", "buy_num": 6, "unit_price": 87, "unit_num": 18, "stock": 15, "discount_price": 264, "package_price": 1566 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC28_kB20-F40", "buy_num": 17, "unit_price": 61, "unit_num": 22, "stock": 15, "discount_price": 154, "package_price": 1342 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC89_kB18-F14", "buy_num": 5, "unit_price": 97, "unit_num": 17, "stock": 38, "discount_price": 283, "package_price": 1649 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC89_kB19-F28", "buy_num": 5, "unit_price": 199, "unit_num": 27, "stock": 27, "discount_price": 117, "package_price": 5373 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "Детские сандалии", "brand": "AFORE", "sku": "RC100_kB12-F20", "buy_num": 10, "unit_price": 73, "unit_num": 22, "stock": 39, "discount_price": 44, "package_price": 1606 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC166_kB20-F44", "buy_num": 3, "unit_price": 185, "unit_num": 4, "stock": 18, "discount_price": 165, "package_price": 740 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC160_kB9-F40", "buy_num": 30, "unit_price": 61, "unit_num": 13, "stock": 37, "discount_price": 59, "package_price": 793 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "Женские туфли", "brand": "24PFM", "sku": "RC121_kB11-F24", "buy_num": 4, "unit_price": 45, "unit_num": 27, "stock": 18, "discount_price": 132, "package_price": 1215 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC114_kB7-F24", "buy_num": 12, "unit_price": 8, "unit_num": 16, "stock": 23, "discount_price": 200, "package_price": 128 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC25_kB13-F35", "buy_num": 3, "unit_price": 126, "unit_num": 7, "stock": 25, "discount_price": 295, "package_price": 882 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC197_kB4-F8", "buy_num": 16, "unit_price": 148, "unit_num": 33, "stock": 40, "discount_price": 213, "package_price": 4884 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC41_kB8-F22", "buy_num": 21, "unit_price": 91, "unit_num": 7, "stock": 20, "discount_price": 162, "package_price": 637 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC136_kB11-F32", "buy_num": 12, "unit_price": 64, "unit_num": 6, "stock": 28, "discount_price": 5, "package_price": 384 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC92_kB13-F38", "buy_num": 17, "unit_price": 141, "unit_num": 34, "stock": 9, "discount_price": 238, "package_price": 4794 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC145_kB7-F41", "buy_num": 29, "unit_price": 154, "unit_num": 25, "stock": 38, "discount_price": 178, "package_price": 3850 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC36_kB17-F37", "buy_num": 27, "unit_price": 81, "unit_num": 9, "stock": 23, "discount_price": 282, "package_price": 729 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC112_kB18-F31", "buy_num": 12, "unit_price": 60, "unit_num": 35, "stock": 28, "discount_price": 50, "package_price": 2100 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC143_kB15-F28", "buy_num": 14, "unit_price": 131, "unit_num": 30, "stock": 5, "discount_price": 179, "package_price": 3930 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC180_kB17-F12", "buy_num": 2, "unit_price": 140, "unit_num": 6, "stock": 3, "discount_price": 87, "package_price": 840 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC29_kB17-F30", "buy_num": 23, "unit_price": 86, "unit_num": 31, "stock": 16, "discount_price": 177, "package_price": 2666 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC147_kB17-F25", "buy_num": 24, "unit_price": 100, "unit_num": 8, "stock": 23, "discount_price": 28, "package_price": 800 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC119_kB18-F40", "buy_num": 3, "unit_price": 149, "unit_num": 9, "stock": 4, "discount_price": 233, "package_price": 1341 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC133_kB8-F48", "buy_num": 2, "unit_price": 191, "unit_num": 28, "stock": 4, "discount_price": 154, "package_price": 5348 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC39_kB17-F23", "buy_num": 23, "unit_price": 181, "unit_num": 32, "stock": 8, "discount_price": 58, "package_price": 5792 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC81_kB11-F34", "buy_num": 16, "unit_price": 13, "unit_num": 18, "stock": 8, "discount_price": 59, "package_price": 234 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC55_kB4-F15", "buy_num": 3, "unit_price": 152, "unit_num": 6, "stock": 11, "discount_price": 156, "package_price": 912 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC89_kB7-F44", "buy_num": 3, "unit_price": 59, "unit_num": 13, "stock": 17, "discount_price": 164, "package_price": 767 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC142_kB17-F12", "buy_num": 29, "unit_price": 38, "unit_num": 19, "stock": 11, "discount_price": 52, "package_price": 722 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC60_kB17-F14", "buy_num": 23, "unit_price": 133, "unit_num": 25, "stock": 35, "discount_price": 111, "package_price": 3325 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC52_kB15-F18", "buy_num": 9, "unit_price": 199, "unit_num": 22, "stock": 26, "discount_price": 130, "package_price": 4378 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "Детские сандалии", "brand": "AFORE", "sku": "RC184_kB6-F31", "buy_num": 24, "unit_price": 54, "unit_num": 27, "stock": 21, "discount_price": 267, "package_price": 1458 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC109_kB13-F17", "buy_num": 20, "unit_price": 124, "unit_num": 20, "stock": 14, "discount_price": 72, "package_price": 2480 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC105_kB17-F42", "buy_num": 12, "unit_price": 197, "unit_num": 19, "stock": 36, "discount_price": 265, "package_price": 3743 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "Женские туфли", "brand": "24PFM", "sku": "RC152_kB13-F30", "buy_num": 29, "unit_price": 38, "unit_num": 27, "stock": 32, "discount_price": 216, "package_price": 1026 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC159_kB11-F18", "buy_num": 22, "unit_price": 5, "unit_num": 31, "stock": 20, "discount_price": 238, "package_price": 155 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC146_kB12-F13", "buy_num": 14, "unit_price": 75, "unit_num": 5, "stock": 24, "discount_price": 291, "package_price": 375 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC108_kB12-F27", "buy_num": 18, "unit_price": 164, "unit_num": 24, "stock": 9, "discount_price": 26, "package_price": 3936 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "Женские босоножки", "brand": "24PFM", "sku": "RC188_kB14-F27", "buy_num": 26, "unit_price": 40, "unit_num": 7, "stock": 20, "discount_price": 46, "package_price": 280 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC190_kB16-F10", "buy_num": 6, "unit_price": 167, "unit_num": 28, "stock": 3, "discount_price": 122, "package_price": 4676 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC53_kB12-F17", "buy_num": 29, "unit_price": 76, "unit_num": 15, "stock": 10, "discount_price": 62, "package_price": 1140 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC60_kB18-F17", "buy_num": 8, "unit_price": 35, "unit_num": 35, "stock": 23, "discount_price": 116, "package_price": 1225 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC154_kB15-F46", "buy_num": 22, "unit_price": 82, "unit_num": 8, "stock": 14, "discount_price": 135, "package_price": 656 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC122_kB19-F15", "buy_num": 6, "unit_price": 61, "unit_num": 34, "stock": 31, "discount_price": 178, "package_price": 2074 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC142_kB11-F24", "buy_num": 4, "unit_price": 58, "unit_num": 20, "stock": 10, "discount_price": 152, "package_price": 1160 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC169_kB19-F34", "buy_num": 13, "unit_price": 21, "unit_num": 7, "stock": 16, "discount_price": 240, "package_price": 147 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC128_kB8-F11", "buy_num": 9, "unit_price": 45, "unit_num": 35, "stock": 18, "discount_price": 56, "package_price": 1575 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC163_kB10-F34", "buy_num": 7, "unit_price": 130, "unit_num": 24, "stock": 19, "discount_price": 54, "package_price": 3120 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC10_kB18-F11", "buy_num": 8, "unit_price": 113, "unit_num": 15, "stock": 17, "discount_price": 117, "package_price": 1695 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC192_kB20-F35", "buy_num": 3, "unit_price": 98, "unit_num": 22, "stock": 33, "discount_price": 59, "package_price": 2156 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC142_kB18-F49", "buy_num": 22, "unit_price": 106, "unit_num": 18, "stock": 12, "discount_price": 89, "package_price": 1908 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "Женские босоножки", "brand": "BAOLUOMILAN", "sku": "RC19_kB16-F28", "buy_num": 6, "unit_price": 113, "unit_num": 18, "stock": 6, "discount_price": 123, "package_price": 2034 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC185_kB17-F50", "buy_num": 21, "unit_price": 4, "unit_num": 9, "stock": 38, "discount_price": 187, "package_price": 36 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC94_kB6-F34", "buy_num": 13, "unit_price": 123, "unit_num": 26, "stock": 11, "discount_price": 270, "package_price": 3198 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC181_kB7-F37", "buy_num": 16, "unit_price": 81, "unit_num": 35, "stock": 33, "discount_price": 47, "package_price": 2835 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC169_kB13-F13", "buy_num": 22, "unit_price": 62, "unit_num": 6, "stock": 7, "discount_price": 231, "package_price": 372 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC129_kB9-F22", "buy_num": 29, "unit_price": 71, "unit_num": 31, "stock": 20, "discount_price": 206, "package_price": 2201 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC77_kB5-F25", "buy_num": 17, "unit_price": 80, "unit_num": 12, "stock": 5, "discount_price": 5, "package_price": 960 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC70_kB15-F14", "buy_num": 24, "unit_price": 99, "unit_num": 32, "stock": 39, "discount_price": 294, "package_price": 3168 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC191_kB9-F29", "buy_num": 21, "unit_price": 157, "unit_num": 18, "stock": 27, "discount_price": 228, "package_price": 2826 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC33_kB5-F47", "buy_num": 10, "unit_price": 119, "unit_num": 18, "stock": 33, "discount_price": 284, "package_price": 2142 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC63_kB20-F19", "buy_num": 19, "unit_price": 138, "unit_num": 9, "stock": 33, "discount_price": 132, "package_price": 1242 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC128_kB9-F48", "buy_num": 12, "unit_price": 193, "unit_num": 20, "stock": 14, "discount_price": 235, "package_price": 3860 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC162_kB8-F38", "buy_num": 5, "unit_price": 114, "unit_num": 24, "stock": 28, "discount_price": 254, "package_price": 2736 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC34_kB19-F12", "buy_num": 4, "unit_price": 136, "unit_num": 11, "stock": 23, "discount_price": 138, "package_price": 1496 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC75_kB20-F48", "buy_num": 22, "unit_price": 7, "unit_num": 30, "stock": 30, "discount_price": 211, "package_price": 210 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC84_kB11-F17", "buy_num": 6, "unit_price": 77, "unit_num": 17, "stock": 18, "discount_price": 118, "package_price": 1309 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC93_kB6-F17", "buy_num": 13, "unit_price": 137, "unit_num": 24, "stock": 2, "discount_price": 160, "package_price": 3288 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "Женские босоножки", "brand": "CAMIDY", "sku": "RC8_kB12-F46", "buy_num": 20, "unit_price": 87, "unit_num": 7, "stock": 34, "discount_price": 283, "package_price": 609 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC38_kB16-F10", "buy_num": 5, "unit_price": 14, "unit_num": 29, "stock": 11, "discount_price": 74, "package_price": 406 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC96_kB9-F22", "buy_num": 20, "unit_price": 169, "unit_num": 31, "stock": 22, "discount_price": 143, "package_price": 5239 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC84_kB20-F46", "buy_num": 29, "unit_price": 136, "unit_num": 31, "stock": 8, "discount_price": 83, "package_price": 4216 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC186_kB19-F48", "buy_num": 20, "unit_price": 59, "unit_num": 26, "stock": 31, "discount_price": 24, "package_price": 1534 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC71_kB10-F12", "buy_num": 21, "unit_price": 109, "unit_num": 6, "stock": 13, "discount_price": 238, "package_price": 654 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC178_kB5-F38", "buy_num": 11, "unit_price": 183, "unit_num": 6, "stock": 2, "discount_price": 235, "package_price": 1098 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC81_kB14-F39", "buy_num": 24, "unit_price": 13, "unit_num": 10, "stock": 4, "discount_price": 16, "package_price": 130 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC158_kB9-F15", "buy_num": 15, "unit_price": 166, "unit_num": 30, "stock": 23, "discount_price": 214, "package_price": 4980 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC199_kB17-F42", "buy_num": 15, "unit_price": 99, "unit_num": 10, "stock": 25, "discount_price": 7, "package_price": 990 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC41_kB9-F39", "buy_num": 9, "unit_price": 131, "unit_num": 12, "stock": 27, "discount_price": 143, "package_price": 1572 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC160_kB19-F49", "buy_num": 24, "unit_price": 128, "unit_num": 24, "stock": 27, "discount_price": 199, "package_price": 3072 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC96_kB12-F11", "buy_num": 8, "unit_price": 14, "unit_num": 11, "stock": 33, "discount_price": 144, "package_price": 154 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC121_kB7-F46", "buy_num": 18, "unit_price": 82, "unit_num": 35, "stock": 31, "discount_price": 167, "package_price": 2870 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC194_kB12-F16", "buy_num": 13, "unit_price": 180, "unit_num": 14, "stock": 24, "discount_price": 174, "package_price": 2520 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC86_kB6-F47", "buy_num": 12, "unit_price": 20, "unit_num": 11, "stock": 33, "discount_price": 38, "package_price": 220 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC77_kB9-F13", "buy_num": 10, "unit_price": 197, "unit_num": 6, "stock": 26, "discount_price": 276, "package_price": 1182 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "Женские босоножки", "brand": "AILAIFA", "sku": "RC52_kB14-F46", "buy_num": 20, "unit_price": 144, "unit_num": 33, "stock": 31, "discount_price": 241, "package_price": 4752 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC92_kB6-F13", "buy_num": 8, "unit_price": 138, "unit_num": 30, "stock": 9, "discount_price": 114, "package_price": 4140 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC71_kB20-F44", "buy_num": 30, "unit_price": 99, "unit_num": 28, "stock": 39, "discount_price": 40, "package_price": 2772 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC86_kB5-F43", "buy_num": 26, "unit_price": 116, "unit_num": 24, "stock": 22, "discount_price": 280, "package_price": 2784 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC99_kB20-F42", "buy_num": 25, "unit_price": 33, "unit_num": 10, "stock": 24, "discount_price": 122, "package_price": 330 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC148_kB10-F17", "buy_num": 14, "unit_price": 185, "unit_num": 9, "stock": 34, "discount_price": 253, "package_price": 1665 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC36_kB13-F19", "buy_num": 10, "unit_price": 45, "unit_num": 12, "stock": 5, "discount_price": 180, "package_price": 540 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC187_kB9-F38", "buy_num": 12, "unit_price": 23, "unit_num": 21, "stock": 34, "discount_price": 162, "package_price": 483 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC127_kB20-F43", "buy_num": 20, "unit_price": 17, "unit_num": 5, "stock": 17, "discount_price": 70, "package_price": 85 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC124_kB11-F45", "buy_num": 22, "unit_price": 132, "unit_num": 14, "stock": 34, "discount_price": 25, "package_price": 1848 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC130_kB20-F38", "buy_num": 28, "unit_price": 3, "unit_num": 23, "stock": 25, "discount_price": 72, "package_price": 69 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC142_kB11-F38", "buy_num": 26, "unit_price": 104, "unit_num": 14, "stock": 29, "discount_price": 54, "package_price": 1456 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC94_kB13-F47", "buy_num": 29, "unit_price": 183, "unit_num": 20, "stock": 35, "discount_price": 228, "package_price": 3660 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC190_kB13-F18", "buy_num": 10, "unit_price": 94, "unit_num": 10, "stock": 29, "discount_price": 62, "package_price": 940 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC132_kB6-F8", "buy_num": 3, "unit_price": 185, "unit_num": 20, "stock": 1, "discount_price": 233, "package_price": 3700 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC94_kB12-F25", "buy_num": 10, "unit_price": 16, "unit_num": 12, "stock": 22, "discount_price": 174, "package_price": 192 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC59_kB20-F50", "buy_num": 9, "unit_price": 65, "unit_num": 29, "stock": 5, "discount_price": 184, "package_price": 1885 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC165_kB10-F22", "buy_num": 6, "unit_price": 156, "unit_num": 30, "stock": 29, "discount_price": 264, "package_price": 4680 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC164_kB4-F37", "buy_num": 26, "unit_price": 88, "unit_num": 9, "stock": 37, "discount_price": 32, "package_price": 792 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC153_kB20-F47", "buy_num": 25, "unit_price": 91, "unit_num": 31, "stock": 39, "discount_price": 141, "package_price": 2821 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC41_kB19-F34", "buy_num": 4, "unit_price": 143, "unit_num": 30, "stock": 5, "discount_price": 225, "package_price": 4290 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC95_kB5-F18", "buy_num": 1, "unit_price": 64, "unit_num": 27, "stock": 14, "discount_price": 235, "package_price": 1728 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC130_kB8-F27", "buy_num": 14, "unit_price": 187, "unit_num": 9, "stock": 38, "discount_price": 209, "package_price": 1683 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC129_kB14-F38", "buy_num": 30, "unit_price": 135, "unit_num": 19, "stock": 33, "discount_price": 153, "package_price": 2565 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC160_kB15-F29", "buy_num": 18, "unit_price": 83, "unit_num": 33, "stock": 31, "discount_price": 251, "package_price": 2739 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC196_kB16-F18", "buy_num": 6, "unit_price": 172, "unit_num": 19, "stock": 11, "discount_price": 258, "package_price": 3268 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC26_kB11-F12", "buy_num": 17, "unit_price": 25, "unit_num": 6, "stock": 1, "discount_price": 139, "package_price": 150 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC52_kB13-F49", "buy_num": 18, "unit_price": 159, "unit_num": 19, "stock": 3, "discount_price": 14, "package_price": 3021 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC92_kB7-F12", "buy_num": 6, "unit_price": 138, "unit_num": 13, "stock": 15, "discount_price": 293, "package_price": 1794 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC113_kB9-F28", "buy_num": 17, "unit_price": 106, "unit_num": 32, "stock": 10, "discount_price": 264, "package_price": 3392 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC85_kB16-F23", "buy_num": 19, "unit_price": 28, "unit_num": 20, "stock": 19, "discount_price": 264, "package_price": 560 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC105_kB10-F45", "buy_num": 8, "unit_price": 92, "unit_num": 36, "stock": 36, "discount_price": 276, "package_price": 3312 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC72_kB9-F25", "buy_num": 12, "unit_price": 161, "unit_num": 31, "stock": 39, "discount_price": 257, "package_price": 4991 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC166_kB13-F39", "buy_num": 21, "unit_price": 148, "unit_num": 35, "stock": 4, "discount_price": 280, "package_price": 5180 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC153_kB16-F35", "buy_num": 13, "unit_price": 147, "unit_num": 34, "stock": 9, "discount_price": 271, "package_price": 4998 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC182_kB6-F32", "buy_num": 16, "unit_price": 38, "unit_num": 31, "stock": 1, "discount_price": 260, "package_price": 1178 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC193_kB8-F11", "buy_num": 17, "unit_price": 73, "unit_num": 5, "stock": 9, "discount_price": 71, "package_price": 365 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC163_kB6-F43", "buy_num": 18, "unit_price": 184, "unit_num": 8, "stock": 39, "discount_price": 214, "package_price": 1472 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC115_kB6-F19", "buy_num": 13, "unit_price": 38, "unit_num": 19, "stock": 36, "discount_price": 251, "package_price": 722 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC106_kB5-F9", "buy_num": 12, "unit_price": 11, "unit_num": 23, "stock": 16, "discount_price": 232, "package_price": 253 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC187_kB8-F38", "buy_num": 16, "unit_price": 160, "unit_num": 11, "stock": 12, "discount_price": 30, "package_price": 1760 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC195_kB16-F12", "buy_num": 2, "unit_price": 22, "unit_num": 15, "stock": 38, "discount_price": 142, "package_price": 330 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC75_kB15-F26", "buy_num": 30, "unit_price": 23, "unit_num": 14, "stock": 7, "discount_price": 178, "package_price": 322 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC18_kB10-F30", "buy_num": 17, "unit_price": 46, "unit_num": 32, "stock": 6, "discount_price": 49, "package_price": 1472 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC6_kB18-F16", "buy_num": 15, "unit_price": 70, "unit_num": 28, "stock": 11, "discount_price": 121, "package_price": 1960 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC95_kB13-F19", "buy_num": 24, "unit_price": 91, "unit_num": 12, "stock": 15, "discount_price": 113, "package_price": 1092 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC77_kB20-F26", "buy_num": 12, "unit_price": 51, "unit_num": 35, "stock": 13, "discount_price": 39, "package_price": 1785 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC130_kB14-F12", "buy_num": 6, "unit_price": 176, "unit_num": 31, "stock": 3, "discount_price": 79, "package_price": 5456 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC186_kB6-F48", "buy_num": 9, "unit_price": 152, "unit_num": 14, "stock": 2, "discount_price": 38, "package_price": 2128 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC176_kB13-F39", "buy_num": 29, "unit_price": 98, "unit_num": 7, "stock": 20, "discount_price": 158, "package_price": 686 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC195_kB15-F33", "buy_num": 4, "unit_price": 9, "unit_num": 11, "stock": 16, "discount_price": 161, "package_price": 99 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC122_kB4-F13", "buy_num": 25, "unit_price": 138, "unit_num": 4, "stock": 16, "discount_price": 146, "package_price": 552 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC183_kB17-F21", "buy_num": 8, "unit_price": 14, "unit_num": 8, "stock": 32, "discount_price": 224, "package_price": 112 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC65_kB9-F21", "buy_num": 4, "unit_price": 99, "unit_num": 34, "stock": 11, "discount_price": 177, "package_price": 3366 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC27_kB18-F19", "buy_num": 8, "unit_price": 146, "unit_num": 13, "stock": 20, "discount_price": 209, "package_price": 1898 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC136_kB10-F24", "buy_num": 15, "unit_price": 23, "unit_num": 21, "stock": 32, "discount_price": 237, "package_price": 483 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC142_kB4-F10", "buy_num": 12, "unit_price": 17, "unit_num": 9, "stock": 23, "discount_price": 251, "package_price": 153 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC86_kB19-F27", "buy_num": 23, "unit_price": 73, "unit_num": 27, "stock": 13, "discount_price": 214, "package_price": 1971 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC38_kB6-F21", "buy_num": 4, "unit_price": 30, "unit_num": 20, "stock": 24, "discount_price": 212, "package_price": 600 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC14_kB14-F38", "buy_num": 5, "unit_price": 114, "unit_num": 22, "stock": 19, "discount_price": 157, "package_price": 2508 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC99_kB20-F21", "buy_num": 8, "unit_price": 188, "unit_num": 22, "stock": 22, "discount_price": 157, "package_price": 4136 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC67_kB9-F32", "buy_num": 5, "unit_price": 65, "unit_num": 12, "stock": 32, "discount_price": 111, "package_price": 780 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC164_kB6-F20", "buy_num": 2, "unit_price": 153, "unit_num": 4, "stock": 13, "discount_price": 266, "package_price": 612 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC42_kB7-F23", "buy_num": 29, "unit_price": 67, "unit_num": 15, "stock": 32, "discount_price": 9, "package_price": 1005 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC179_kB7-F10", "buy_num": 6, "unit_price": 19, "unit_num": 29, "stock": 3, "discount_price": 259, "package_price": 551 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC56_kB5-F47", "buy_num": 28, "unit_price": 165, "unit_num": 6, "stock": 20, "discount_price": 11, "package_price": 990 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC167_kB10-F24", "buy_num": 25, "unit_price": 3, "unit_num": 32, "stock": 25, "discount_price": 181, "package_price": 96 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC107_kB12-F31", "buy_num": 2, "unit_price": 39, "unit_num": 33, "stock": 18, "discount_price": 47, "package_price": 1287 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC167_kB19-F34", "buy_num": 15, "unit_price": 186, "unit_num": 15, "stock": 23, "discount_price": 195, "package_price": 2790 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC15_kB8-F42", "buy_num": 5, "unit_price": 108, "unit_num": 14, "stock": 32, "discount_price": 227, "package_price": 1512 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC175_kB10-F33", "buy_num": 23, "unit_price": 69, "unit_num": 17, "stock": 2, "discount_price": 108, "package_price": 1173 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC178_kB6-F29", "buy_num": 10, "unit_price": 106, "unit_num": 25, "stock": 27, "discount_price": 207, "package_price": 2650 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC61_kB13-F30", "buy_num": 19, "unit_price": 165, "unit_num": 35, "stock": 11, "discount_price": 96, "package_price": 5775 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC32_kB9-F28", "buy_num": 27, "unit_price": 18, "unit_num": 33, "stock": 8, "discount_price": 61, "package_price": 594 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC39_kB10-F23", "buy_num": 12, "unit_price": 158, "unit_num": 29, "stock": 19, "discount_price": 77, "package_price": 4582 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC14_kB6-F34", "buy_num": 22, "unit_price": 168, "unit_num": 16, "stock": 26, "discount_price": 145, "package_price": 2688 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC21_kB7-F41", "buy_num": 21, "unit_price": 85, "unit_num": 32, "stock": 18, "discount_price": 173, "package_price": 2720 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC134_kB11-F22", "buy_num": 23, "unit_price": 62, "unit_num": 28, "stock": 29, "discount_price": 149, "package_price": 1736 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC171_kB16-F26", "buy_num": 5, "unit_price": 150, "unit_num": 34, "stock": 3, "discount_price": 219, "package_price": 5100 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC93_kB19-F46", "buy_num": 5, "unit_price": 32, "unit_num": 11, "stock": 9, "discount_price": 288, "package_price": 352 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC146_kB11-F42", "buy_num": 22, "unit_price": 100, "unit_num": 8, "stock": 21, "discount_price": 169, "package_price": 800 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "Женские туфли", "brand": "24PFM", "sku": "RC116_kB5-F48", "buy_num": 7, "unit_price": 129, "unit_num": 34, "stock": 1, "discount_price": 14, "package_price": 4386 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC98_kB5-F47", "buy_num": 25, "unit_price": 56, "unit_num": 17, "stock": 18, "discount_price": 228, "package_price": 952 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC21_kB9-F35", "buy_num": 11, "unit_price": 197, "unit_num": 9, "stock": 24, "discount_price": 146, "package_price": 1773 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC48_kB7-F16", "buy_num": 26, "unit_price": 162, "unit_num": 28, "stock": 33, "discount_price": 6, "package_price": 4536 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "Женские босоножки", "brand": "24PFM", "sku": "RC15_kB16-F14", "buy_num": 15, "unit_price": 182, "unit_num": 26, "stock": 1, "discount_price": 91, "package_price": 4732 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC37_kB18-F20", "buy_num": 26, "unit_price": 116, "unit_num": 30, "stock": 36, "discount_price": 21, "package_price": 3480 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC198_kB17-F23", "buy_num": 13, "unit_price": 142, "unit_num": 10, "stock": 24, "discount_price": 287, "package_price": 1420 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC148_kB18-F50", "buy_num": 26, "unit_price": 36, "unit_num": 7, "stock": 34, "discount_price": 101, "package_price": 252 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC171_kB5-F25", "buy_num": 2, "unit_price": 47, "unit_num": 26, "stock": 5, "discount_price": 189, "package_price": 1222 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "Женские босоножки", "brand": "CAMIDY", "sku": "RC135_kB9-F47", "buy_num": 27, "unit_price": 164, "unit_num": 13, "stock": 25, "discount_price": 296, "package_price": 2132 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "Женские босоножки", "brand": "AFORE", "sku": "RC66_kB6-F39", "buy_num": 26, "unit_price": 158, "unit_num": 34, "stock": 16, "discount_price": 4, "package_price": 5372 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "Женские босоножки", "brand": "AFORE", "sku": "RC122_kB17-F37", "buy_num": 11, "unit_price": 5, "unit_num": 20, "stock": 21, "discount_price": 184, "package_price": 100 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "Детские сандалии", "brand": "BOOTSEA", "sku": "RC50_kB10-F47", "buy_num": 12, "unit_price": 188, "unit_num": 5, "stock": 32, "discount_price": 62, "package_price": 940 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC106_kB18-F31", "buy_num": 2, "unit_price": 46, "unit_num": 29, "stock": 35, "discount_price": 229, "package_price": 1334 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC196_kB14-F34", "buy_num": 20, "unit_price": 81, "unit_num": 20, "stock": 34, "discount_price": 124, "package_price": 1620 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC144_kB14-F49", "buy_num": 23, "unit_price": 94, "unit_num": 36, "stock": 7, "discount_price": 225, "package_price": 3384 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC190_kB18-F41", "buy_num": 1, "unit_price": 42, "unit_num": 33, "stock": 10, "discount_price": 55, "package_price": 1386 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC9_kB10-F12", "buy_num": 14, "unit_price": 95, "unit_num": 30, "stock": 39, "discount_price": 41, "package_price": 2850 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC182_kB19-F41", "buy_num": 14, "unit_price": 154, "unit_num": 28, "stock": 16, "discount_price": 225, "package_price": 4312 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC73_kB20-F30", "buy_num": 22, "unit_price": 110, "unit_num": 11, "stock": 19, "discount_price": 175, "package_price": 1210 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC178_kB20-F45", "buy_num": 13, "unit_price": 1, "unit_num": 19, "stock": 31, "discount_price": 295, "package_price": 19 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC4_kB15-F37", "buy_num": 7, "unit_price": 157, "unit_num": 34, "stock": 19, "discount_price": 167, "package_price": 5338 }, { "imgUrl": "/images/qiproduct1.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC87_kB7-F19", "buy_num": 26, "unit_price": 100, "unit_num": 7, "stock": 9, "discount_price": 293, "package_price": 700 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC195_kB8-F38", "buy_num": 3, "unit_price": 56, "unit_num": 12, "stock": 34, "discount_price": 67, "package_price": 672 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC95_kB13-F11", "buy_num": 16, "unit_price": 173, "unit_num": 28, "stock": 5, "discount_price": 151, "package_price": 4844 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC37_kB18-F49", "buy_num": 23, "unit_price": 140, "unit_num": 32, "stock": 1, "discount_price": 204, "package_price": 4480 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC69_kB9-F13", "buy_num": 15, "unit_price": 33, "unit_num": 21, "stock": 22, "discount_price": 27, "package_price": 693 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC7_kB5-F15", "buy_num": 1, "unit_price": 174, "unit_num": 33, "stock": 33, "discount_price": 81, "package_price": 5742 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC26_kB11-F39", "buy_num": 8, "unit_price": 178, "unit_num": 9, "stock": 23, "discount_price": 82, "package_price": 1602 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC151_kB13-F46", "buy_num": 29, "unit_price": 159, "unit_num": 9, "stock": 14, "discount_price": 158, "package_price": 1431 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "Женские босоножки", "brand": "AFORE", "sku": "RC46_kB16-F11", "buy_num": 8, "unit_price": 173, "unit_num": 29, "stock": 4, "discount_price": 14, "package_price": 5017 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "Женские туфли", "brand": "AILAIFA", "sku": "RC175_kB12-F49", "buy_num": 20, "unit_price": 28, "unit_num": 24, "stock": 10, "discount_price": 227, "package_price": 672 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "Детские сандалии", "brand": "AILAIFA", "sku": "RC175_kB19-F46", "buy_num": 4, "unit_price": 144, "unit_num": 22, "stock": 6, "discount_price": 153, "package_price": 3168 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "AILAIFA", "sku": "RC169_kB8-F21", "buy_num": 6, "unit_price": 146, "unit_num": 14, "stock": 25, "discount_price": 145, "package_price": 2044 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "Женские босоножки", "brand": "CAMIDY", "sku": "RC150_kB5-F38", "buy_num": 22, "unit_price": 150, "unit_num": 23, "stock": 8, "discount_price": 115, "package_price": 3450 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC151_kB13-F13", "buy_num": 20, "unit_price": 30, "unit_num": 7, "stock": 29, "discount_price": 3, "package_price": 210 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC15_kB13-F46", "buy_num": 6, "unit_price": 117, "unit_num": 15, "stock": 34, "discount_price": 235, "package_price": 1755 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC90_kB19-F21", "buy_num": 11, "unit_price": 99, "unit_num": 35, "stock": 4, "discount_price": 108, "package_price": 3465 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC188_kB5-F36", "buy_num": 5, "unit_price": 176, "unit_num": 23, "stock": 19, "discount_price": 286, "package_price": 4048 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC177_kB17-F23", "buy_num": 24, "unit_price": 186, "unit_num": 31, "stock": 34, "discount_price": 248, "package_price": 5766 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC121_kB6-F23", "buy_num": 10, "unit_price": 77, "unit_num": 11, "stock": 1, "discount_price": 275, "package_price": 847 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC84_kB15-F37", "buy_num": 13, "unit_price": 194, "unit_num": 10, "stock": 22, "discount_price": 253, "package_price": 1940 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC91_kB5-F45", "buy_num": 17, "unit_price": 72, "unit_num": 8, "stock": 22, "discount_price": 138, "package_price": 576 }, { "imgUrl": "/images/qiproduct5.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC93_kB14-F27", "buy_num": 18, "unit_price": 19, "unit_num": 17, "stock": 19, "discount_price": 219, "package_price": 323 }, { "imgUrl": "/images/qiproduct8.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC81_kB18-F36", "buy_num": 7, "unit_price": 121, "unit_num": 4, "stock": 28, "discount_price": 117, "package_price": 484 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC130_kB6-F24", "buy_num": 12, "unit_price": 92, "unit_num": 32, "stock": 7, "discount_price": 206, "package_price": 2944 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC61_kB16-F34", "buy_num": 12, "unit_price": 6, "unit_num": 12, "stock": 18, "discount_price": 19, "package_price": 72 }, { "imgUrl": "/images/qiproduct10.jpg", "name": "<NAME>", "brand": "BOOTSEA", "sku": "RC175_kB7-F30", "buy_num": 30, "unit_price": 2, "unit_num": 20, "stock": 36, "discount_price": 284, "package_price": 40 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC33_kB13-F20", "buy_num": 28, "unit_price": 141, "unit_num": 33, "stock": 25, "discount_price": 275, "package_price": 4653 }, { "imgUrl": "/images/qiproduct4.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC11_kB9-F33", "buy_num": 23, "unit_price": 124, "unit_num": 23, "stock": 1, "discount_price": 236, "package_price": 2852 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "AFORE", "sku": "RC152_kB7-F41", "buy_num": 25, "unit_price": 5, "unit_num": 5, "stock": 3, "discount_price": 23, "package_price": 25 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC20_kB16-F11", "buy_num": 22, "unit_price": 188, "unit_num": 28, "stock": 23, "discount_price": 132, "package_price": 5264 }, { "imgUrl": "/images/qiproduct3.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC185_kB6-F29", "buy_num": 24, "unit_price": 52, "unit_num": 11, "stock": 39, "discount_price": 277, "package_price": 572 }, { "imgUrl": "/images/qiproduct6.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC176_kB15-F32", "buy_num": 15, "unit_price": 189, "unit_num": 7, "stock": 36, "discount_price": 29, "package_price": 1323 }, { "imgUrl": "/images/qiproduct2.jpg", "name": "<NAME>", "brand": "24PFM", "sku": "RC42_kB9-F45", "buy_num": 26, "unit_price": 110, "unit_num": 8, "stock": 5, "discount_price": 125, "package_price": 880 }, { "imgUrl": "/images/qiproduct9.jpg", "name": "<NAME>", "brand": "CAMIDY", "sku": "RC112_kB12-F28", "buy_num": 16, "unit_price": 144, "unit_num": 35, "stock": 5, "discount_price": 171, "package_price": 5040 }, { "imgUrl": "/images/qiproduct7.jpg", "name": "<NAME>", "brand": "BAOLUOMILAN", "sku": "RC156_kB6-F32", "buy_num": 14, "unit_price": 142, "unit_num": 10, "stock": 18, "discount_price": 9, "package_price": 1420 } ] function dataGeneration(num) { var jsonData = []; var ary = ["Детские ботинки", "Женские босоножки", "Женские туфли", "Детские сандалии"]; var ary2 = ["AILAIFA", "AFORE", "24PFM", "CAMIDY", "BAOLUOMILAN", "BOOTSEA"]; function math(n, m) { return Math.round(Math.random() * (m - n) + n) } var obj={ checkboxHistory:true } for (var i = 0; i < num; i++) { var itemobj = { "imgUrl": "/images/qiproduct" + math(1, 10) + ".jpg", "name": ary[math(0, 3)], "brand": ary2[math(0, 5)], "sku": "RC" + math(1, 200) + "_kB" + math(4, 20) + "-F" + math(8, 50) + "", "buy_num": math(1, 30), //初始化购买数量 "unit_price": math(0, 200), "unit_num": math(4, 36), // "package":itemobj.pair*itemobj.unitnum, "stock": math(0, 40), "discount_price": math(1, 300), } itemobj.package_price = itemobj.unit_price * itemobj.unit_num; jsonData.push(itemobj) } $('body').html(JSON.stringify(jsonData)); // winData=JSON.stringify(jsonData); } // dataGeneration(300)
ef4b87547ee6767ce145911bbf52c739446ee145
[ "JavaScript" ]
1
JavaScript
hewenhai123/virDOM
277ca161f14a592a312afd33ce3423fc8202209e
4ebd06a5dd7bba80f1cbd8a93bf853f9857366ad
refs/heads/master
<repo_name>kangsukyong/bd18f-kangsukyong<file_sep>/train_parallax.py # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ python train_parallax.py --input_file_pattern={processed_data_path}/train-?????-of-00256 --inception_checkpoint_file={inception_v3_file} --train_inception=false --number_of_steps=200 Example command for examining the checkpoint file: python <PARALLAX_HOME>/tensorflow/tensorflow/python/tools/inspect_checkpoint.py --file_name=tf_ckpt/model.ckpt-0 --tensor_name=InceptionV3/Conv2d_a1_3x3_weights """ """Train the model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import tensorflow as tf import parallax from im2txt import configuration from im2txt import show_and_tell_model FLAGS = tf.app.flags.FLAGS tf.flags.DEFINE_string("input_file_pattern", "", "File pattern of sharded TFRecord input files.") tf.flags.DEFINE_string("train_dir", "", "Directory for saving and loading model checkpoints.") tf.flags.DEFINE_string("inception_checkpoint_file", "", "Path to a pretrained inception_v3 model.") tf.flags.DEFINE_boolean("train_inception", False, "Whether to train inception submodel variables.") tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.") tf.flags.DEFINE_integer("log_every_n_steps", 10, "Frequency at which loss and global step are logged.") tf.flags.DEFINE_string("resource_info_file", os.path.abspath(os.path.join(os.path.dirname(__file__), ".", "resource_info")), "Filename containing cluster information") tf.flags.DEFINE_boolean("sync", True, '') tf.logging.set_verbosity(tf.logging.INFO) assert FLAGS.input_file_pattern, "--input_file_pattern is required" assert FLAGS.train_dir, "--train_dir is required" model_config = configuration.ModelConfig() model_config.input_file_pattern = FLAGS.input_file_pattern model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file training_config = configuration.TrainingConfig() g = tf.Graph() with g.as_default(): # build the model model = show_and_tell_model.ShowAndTellModel(model_config, mode='train', train_inception=FLAGS.train_inception) model.build() # set up the learning rate learning_rate_decay_fn = None if FLAGS.train_inception: learning_rate = tf.constant(training_config.train_inception_learning_rate) else: learning_rate = tf.constant(training_config.initial_learning_rate) if training_config.learning_rate_decay_factor > 0: num_batches_per_epoch = training_config.num_examples_per_epoch / model_config.batch_size decay_steps = int(num_batches_per_epoch * training_config.num_epochs_per_decay) def _learning_rate_decay_fn(learning_rate, global_step): return tf.train.exponential_decay(learning_rate, global_step, decay_steps=decay_steps, decay_rate=training_config.learning_rate_decay_factor, staircase=True) learning_rate_decay_fn = _learning_rate_decay_fn # set up the training ops train_op = tf.contrib.layers.optimize_loss(loss=model.total_loss, global_step=model.global_step, learning_rate=learning_rate, optimizer=training_config.optimizer, clip_gradients=training_config.clip_gradients, learning_rate_decay_fn=learning_rate_decay_fn) parallax_config = parallax.Config() parallax_config.ckpt_config = parallax.CheckPointConfig(ckpt_dir='parallax_ckpt', save_ckpt_steps=1) sess, num_workers, worker_id, num_replicas_per_worker = parallax.parallel_run(g, FLAGS.resource_info_file, sync=FLAGS.sync, parallax_config=parallax_config) start = time.time() for i in range(FLAGS.number_of_steps): if not sess.should_stop(): _, loss_ = sess.run([train_op, model.total_loss]) if i % FLAGS.log_every_n_steps == 0: end = time.time() throughput = float(FLAGS.log_every_n_steps) / float(end - start) print("step: %d, throuphput: %f steps/sec" % (i, throughput)) start = time.time() <file_sep>/train_tf.py # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ python train_tf.py --ps_hosts=localhost:12345 --worker_hosts=localhost:12346,localhost:12347 --job_name=ps --task_index=0 --input_file_pattern={processed_data_path}/train-?????-of-00256 --inception_checkpoint_file={inception_v3_file} --train_inception=false --number_of_steps=200 python train_tf.py --ps_hosts=localhost:12345 --worker_hosts=localhost:12346,localhost:12347 --job_name=worker --task_index=0 --max_steps=100 --input_file_pattern={processed_data_path}/train-?????-of-00256 --inception_checkpoint_file={inception_v3_file} --train_inception=false --number_of_steps=200 python train_tf.py --ps_hosts=localhost:12345 --worker_hosts=localhost:12346,localhost:12347 --job_name=worker --task_index=1 --max_steps=100 --input_file_pattern={processed_data_path}/train-?????-of-00256 --inception_checkpoint_file={inception_v3_file} --train_inception=false --number_of_steps=200 Example command for examining the checkpoint file: python <PARALLAX_HOME>/tensorflow/tensorflow/python/tools/inspect_checkpoint.py --file_name=tf_ckpt/model.ckpt-0 --tensor_name=InceptionV3/Conv2d_a1_3x3_weights """ """Train the model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import tensorflow as tf from im2txt import configuration from im2txt import show_and_tell_model FLAGS = tf.app.flags.FLAGS tf.flags.DEFINE_string('ps_hosts', None, "Comma-separated list of hostname:port pairs") tf.flags.DEFINE_string('worker_hosts', None, "Comma-separated list of hostname:port pairs") tf.flags.DEFINE_string('job_name', None, "One of 'ps', 'worker'") tf.flags.DEFINE_integer('task_index', 0, "Index of task within the job") tf.flags.DEFINE_string("input_file_pattern", "", "File pattern of sharded TFRecord input files.") tf.flags.DEFINE_string("inception_checkpoint_file", "", "Path to a pretrained inception_v3 model.") tf.flags.DEFINE_boolean("train_inception", False, "Whether to train inception submodel variables.") tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.") tf.flags.DEFINE_integer("log_every_n_steps", 10, "Frequency at which loss and global step are logged.") tf.logging.set_verbosity(tf.logging.INFO) ps_hosts = FLAGS.ps_hosts.split(',') worker_hosts = FLAGS.worker_hosts.split(',') num_workers = len(worker_hosts) # create a cluster from the parameter server and worker hosts. cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts}) # create and start a server for the local task. server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index) if FLAGS.job_name == 'ps': server.join() exit(0) assert FLAGS.input_file_pattern, "--input_file_pattern is required" model_config = configuration.ModelConfig() model_config.input_file_pattern = FLAGS.input_file_pattern model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file training_config = configuration.TrainingConfig() with tf.device(tf.train.replica_device_setter(worker_device='job:worker/task:%d/cpu:0' % FLAGS.task_index, cluster=cluster)): # build the model model = show_and_tell_model.ShowAndTellModel(model_config, mode='train', train_inception=FLAGS.train_inception) model.build() # set up the learning rate learning_rate_decay_fn = None if FLAGS.train_inception: learning_rate = tf.constant(training_config.train_inception_learning_rate) else: learning_rate = tf.constant(training_config.initial_learning_rate) if training_config.learning_rate_decay_factor > 0: num_batches_per_epoch = training_config.num_examples_per_epoch / model_config.batch_size decay_steps = int(num_batches_per_epoch * training_config.num_epochs_per_decay) def _learning_rate_decay_fn(learning_rate, global_step): return tf.train.exponential_decay(learning_rate, global_step, decay_steps=decay_steps, decay_rate=training_config.learning_rate_decay_factor, staircase=True) learning_rate_decay_fn = _learning_rate_decay_fn # set optimizer for distributed training optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) optimizer = tf.train.SyncReplicasOptimizer(optimizer, replicas_to_aggregate=num_workers, total_num_replicas=num_workers) # set up the training ops train_op = tf.contrib.layers.optimize_loss(loss=model.total_loss, global_step=model.global_step, learning_rate=learning_rate, optimizer=optimizer, clip_gradients=training_config.clip_gradients, learning_rate_decay_fn=learning_rate_decay_fn) # set up the saver for saving and restoring model checkpoints saver = tf.train.Saver(tf.global_variables(), save_relative_paths=False, allow_empty=True, max_to_keep=training_config.max_checkpoints_to_keep) # set chief session is_chief = (FLAGS.task_index == 0) sync_replicas_hook = optimizer.make_session_run_hook(is_chief, num_tokens=0) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) scaffold = tf.train.Scaffold(init_fn=model.init_fn, saver=saver) ckpt_hook = tf.train.CheckpointSaverHook('tf_ckpt', save_steps=10, scaffold=scaffold) # run session with tf.train.MonitoredTrainingSession(master=server.target, is_chief=is_chief, hooks=[sync_replicas_hook], chief_only_hooks=[ckpt_hook]) as sess: start = time.time() for i in range(FLAGS.number_of_steps): if not sess.should_stop(): _, loss_ = sess.run([train_op, model.total_loss]) if i % FLAGS.log_every_n_steps == 0: end = time.time() throughput = float(FLAGS.log_every_n_steps) / float(end - start) print("step: %d, throuphput: %f steps/sec" % (i, throughput)) start = time.time() <file_sep>/train_horovod.py # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ mpirun --mca btl_vader_single_copy_mechanism none --allow-run-as-root -bind-to none -map-by slot -mca orte_base_help_aggregate 0 -x NCCL_DEBUG=INFO -np 2 -H localhost:2 python train_horovod.py --input_file_pattern={processed_data_path}/train-?????-of-00256 --inception_checkpoint_file={inception_v3_file} --train_inception=false --number_of_steps=200 Example command for examining the checkpoint file: python <PARALLAX_HOME>/tensorflow/tensorflow/python/tools/inspect_checkpoint.py --file_name=tf_ckpt/model.ckpt-0 --tensor_name=InceptionV3/Conv2d_a1_3x3_weights """ """Train the model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import tensorflow as tf import horovod.tensorflow as hvd from im2txt import configuration from im2txt import show_and_tell_model hvd.init() FLAGS = tf.app.flags.FLAGS tf.flags.DEFINE_string("input_file_pattern", "", "File pattern of sharded TFRecord input files.") tf.flags.DEFINE_string("inception_checkpoint_file", "", "Path to a pretrained inception_v3 model.") tf.flags.DEFINE_boolean("train_inception", False, "Whether to train inception submodel variables.") tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.") tf.flags.DEFINE_integer("log_every_n_steps", 10, "Frequency at which loss and global step are logged.") tf.logging.set_verbosity(tf.logging.INFO) assert FLAGS.input_file_pattern, "--input_file_pattern is required" model_config = configuration.ModelConfig() model_config.input_file_pattern = FLAGS.input_file_pattern model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file training_config = configuration.TrainingConfig() # build the model model = show_and_tell_model.ShowAndTellModel(model_config, mode='train', train_inception=FLAGS.train_inception) model.build() # set up the learning rate learning_rate_decay_fn = None if FLAGS.train_inception: learning_rate = tf.constant(training_config.train_inception_learning_rate) else: learning_rate = tf.constant(training_config.initial_learning_rate) if training_config.learning_rate_decay_factor > 0: num_batches_per_epoch = training_config.num_examples_per_epoch / model_config.batch_size decay_steps = int(num_batches_per_epoch * training_config.num_epochs_per_decay) def _learning_rate_decay_fn(learning_rate, global_step): return tf.train.exponential_decay(learning_rate, global_step, decay_steps=decay_steps, decay_rate=training_config.learning_rate_decay_factor, staircase=True) learning_rate_decay_fn = _learning_rate_decay_fn # set optimizer for distributed training optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) optimizer = hvd.DistributedOptimizer(optimizer) # set up the training ops train_op = tf.contrib.layers.optimize_loss(loss=model.total_loss, global_step=model.global_step, learning_rate=learning_rate, optimizer=optimizer, clip_gradients=training_config.clip_gradients, learning_rate_decay_fn=learning_rate_decay_fn) hooks = [hvd.BroadcastGlobalVariablesHook(0)] if hvd.rank() == 0: # set up the saver for saving and restoring model checkpoints saver = tf.train.Saver(tf.global_variables(), save_relative_paths=False, allow_empty=True, max_to_keep=training_config.max_checkpoints_to_keep) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) scaffold = tf.train.Scaffold(init_fn=model.init_fn, saver=saver) ckpt_hook = tf.train.CheckpointSaverHook('horovod_ckpt', save_steps=1, scaffold=scaffold) hooks.append(ckpt_hook) with tf.train.MonitoredTrainingSession(hooks=hooks) as sess: start = time.time() for i in range(FLAGS.number_of_steps): if not sess.should_stop(): _, loss_ = sess.run([train_op, model.total_loss]) if i % FLAGS.log_every_n_steps == 0: end = time.time() throughput = float(FLAGS.log_every_n_steps) / float(end - start) print("step: %d, loss: %lf, throuphput: %f steps/sec" % (i, loss_, throughput)) start = time.time()
7d1f576d8e220ebe884fc6e3cc6d953fc02b1393
[ "Python" ]
3
Python
kangsukyong/bd18f-kangsukyong
e50d6136644dd2bddf77a2f8591c35b6e0e8048b
a319ffe71e41c48ce149b673a7d1acdc8beda8de
refs/heads/master
<file_sep>class ContentItemsController < ApplicationController include Concerns::LtiSupport skip_before_filter :verify_authenticity_token before_filter :secure_launch, only: [:index] before_filter :check_is_content_item, only: [:index] before_filter :check_return_url, only: [:show] def index if params[:accept_media_types] =~ %r{text/html} @return_type = 'iframe' else @return_type = 'lti_launch' end @items = content_items render layout: "client" end def show @item = content_items.find{|item| item[:id] == params[:id].to_i} if params[:return_type] == 'iframe' content = embed_iframe(@item) else content = lti_launch(@item) end content_items = content_item_hash(content) @consumer = IMS::LTI::ToolConsumer.new(current_lti_application_instance.lti_key, current_lti_application_instance.lti_secret) tc = IMS::LTI::ToolConfig.new(launch_url: params[:return_url]) @consumer.set_config(tc) @consumer.resource_link_id = "fake_id" @consumer.lti_message_type = 'ContentItemSelection' @consumer.set_non_spec_param('content_items', content_items.to_json) end def setup @xml = config_xml @lti_application_instances = LtiApplicationInstance.all respond_to do |format| format.html format.xml { render xml: @xml.strip } end end private def config_xml launch_url = root_url selection_ui_url = content_items_url <<-XML <?xml version="1.0" encoding="UTF-8"?> <cartridge_basiclti_link xmlns="http://www.imsglobal.org/xsd/imslticc_v1p0" xmlns:blti="http://www.imsglobal.org/xsd/imsbasiclti_v1p0" xmlns:lticm="http://www.imsglobal.org/xsd/imslticm_v1p0" xmlns:lticp="http://www.imsglobal.org/xsd/imslticp_v1p0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.imsglobal.org/xsd/imslticc_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticc_v1p0.xsd http://www.imsglobal.org/xsd/imsbasiclti_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imsbasiclti_v1p0p1.xsd http://www.imsglobal.org/xsd/imslticm_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticm_v1p0.xsd http://www.imsglobal.org/xsd/imslticp_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticp_v1p0.xsd"> <blti:title>Content Item Example</blti:title> <blti:description>Content Item Example</blti:description> <blti:launch_url>#{launch_url}</blti:launch_url> <blti:extensions platform="canvas.instructure.com"> <lticm:property name="domain">#{launch_url}</lticm:property> <lticm:property name="privacy_level">public</lticm:property> <lticm:options name="assignment_selection"> <lticm:property name="canvas_icon_class">icon-lti</lticm:property> <lticm:property name="url">#{selection_ui_url}</lticm:property> <lticm:property name="message_type">ContentItemSelectionRequest</lticm:property> <lticm:property name="selection_height">700</lticm:property> <lticm:property name="selection_width">700</lticm:property> </lticm:options> <lticm:options name="link_selection"> <lticm:property name="canvas_icon_class">icon-lti</lticm:property> <lticm:property name="url">#{selection_ui_url}</lticm:property> <lticm:property name="message_type">ContentItemSelectionRequest</lticm:property> <lticm:property name="selection_height">700</lticm:property> <lticm:property name="selection_width">700</lticm:property> </lticm:options> <lticm:options name="editor_button"> <lticm:property name="canvas_icon_class">icon-lti</lticm:property> <lticm:property name="message_type">ContentItemSelectionRequest</lticm:property> <lticm:property name="text">Content Item Example</lticm:property> <lticm:property name="url">#{selection_ui_url}</lticm:property> <lticm:property name="selection_height">700</lticm:property> <lticm:property name="selection_width">700</lticm:property> </lticm:options> </blti:extensions> </cartridge_basiclti_link> XML end def check_is_content_item unless params[:lti_message_type] == 'ContentItemSelectionRequest' raise "LTI configuration error. This route is for Content item selection only." end end def check_return_url raise "No return url for content item" unless params[:return_url].present? end def content_item_hash(items) { "@context" => "http://purl.imsglobal.org/ctx/lti/v1/ContentItem", "@graph" => items } end def lti_launch(item) [ { "@type" => "LtiLinkItem", "mediaType" => "application/vnd.ims.lti.v1.ltilink", "url" => lti_launches_url(item[:id]), "title" => item[:name], }, { "@type" => "LtiLinkItem", "mediaType" => "application/vnd.ims.lti.v1.ltilink", "url" => lti_launches_url(item[:id]), "title" => item[:name], "text" => item[:name], "lineItem" => { "@type" => "LineItem", "label" => item[:name], "reportingMethod" => "res:totalScore", "maximumScore" => 10, "scoreConstraints" => { "@type" => "NumericLimits", "normalMaximum" => 10, "totalMaximum" => 10 } } } ] end def embed_iframe(item) url = item_url(item[:id]) iframe = <<-HTML <iframe style="width: 100%; height: 500px;" src="#{url}"> </iframe> HTML [{ "@type" => "ContentItem", "mediaType" => "text/html", "text" => iframe, "placementAdvice" => { "presentationDocumentTarget" => "embed" } }] end def secure_launch if !valid_lti_request?(current_lti_application_instance.lti_key, current_lti_application_instance.lti_secret) user_not_authorized end end end <file_sep>class ItemsController < ApplicationController def show @item = content_items.find{|item| item[:id] == params[:id].to_i} render layout: "client" end end
13f8dbfa6b42afe97bcbba7a66ae268c42c6a048
[ "Ruby" ]
2
Ruby
harikrishnantb/content_item_example
4ff32e17e5445f92ec60c590c71f5c8220e14e08
8cfdd89a1574c4c129599403153cca69de7aaf0d
refs/heads/master
<file_sep>package io.kotlintest.assertions.arrow import arrow.data.Validated import arrow.effects.IO import arrow.effects.extensions.io.applicative.applicative import arrow.core.extensions.eq import arrow.core.extensions.order import arrow.core.extensions.semigroup import arrow.validation.refinedTypes.numeric.validated.negative.negative import io.kotlintest.assertions.arrow.`try`.`try` import io.kotlintest.assertions.arrow.either.either import io.kotlintest.assertions.arrow.eq.EqAssertions import io.kotlintest.assertions.arrow.nel.nel import io.kotlintest.assertions.arrow.option.option import io.kotlintest.assertions.arrow.order.OrderAssertions import io.kotlintest.assertions.arrow.refinements.forAll import io.kotlintest.assertions.arrow.refinements.shouldBeRefinedBy import io.kotlintest.assertions.arrow.tagless.io.taglessAssertions.shouldBeInterpretedTo import io.kotlintest.assertions.arrow.validated.nonEmptyPerson.nonEmptyPerson import io.kotlintest.assertions.arrow.validation.validated import io.kotlintest.properties.Gen import io.kotlintest.properties.forAll import io.kotlintest.shouldThrow import io.kotlintest.specs.StringSpec import io.kotlintest.assertions.arrow.gen.gen.fx.fx class ArrowAssertionsTests : StringSpec({ "Provide assertions and matchers for refined types" { -1 shouldBeRefinedBy Validated.negative(Int.order()) } "Allow semi automatic derivation of Gen encoders for arbitrary product types" { shouldThrow<AssertionError> { forAll(Person.gen()) { it.name.isNotEmpty() } } } "Provide semi automatic derivation and refined predicates in `forAll` universal quantifiers" { forAll(Person.gen(), Validated.nonEmptyPerson()) { it.name.isNotEmpty() } } "Provide assertions for ad-hoc polymorphic programs and higher kinded values [IO]" { IO.applicative().run { helloWorldPoly() shouldBeInterpretedTo "Hello World" } } "Provide assertions for values bound by the `Eq` type class" { EqAssertions(Int.eq()) { 0 shouldBeEqvTo 0 0 shouldNotBeEqvTo -1 } } "Provide assertions for values bound by the `Order` type class" { OrderAssertions(Int.order()) { 0 shouldBeEqvTo 0 0 shouldNotBeEqvTo -1 0 shouldBeGreaterThan -1 0 shouldBeGreaterThanOrEqual 0 0 shouldBeSmallerThan 1 0 shouldBeSmallerThanOrEqual 0 } } "Gen<NonEmptyList<A>>" { forAll(Gen.nel(Gen.int(), 0)) { it.contains(0) } } "Gen<Either<A, B>>" { forAll(Gen.either(Gen.constant(1), Gen.constant(0))) { it.fold({ l -> l == 1 }, { r -> r == 0 }) } } "Gen<Option<A>>" { forAll(Gen.option(Gen.constant(1))) { it.fold({ true }, { n -> n == 1 }) } } "Gen<Try<A>>" { forAll(Gen.`try`(Gen.constant(Ex), Gen.constant(1))) { it.fold({ ex -> ex == Ex }, { n -> n == 1 }) } } "Gen<Validated<A, B>>" { forAll(Gen.validated(Gen.constant(1), Gen.constant(0), Int.semigroup())) { it.fold({ l -> l == 1 }, { r -> r == 0 }) } } "Gen binding" { val prefix = "_" val personGen: Gen<Person> = fx { val id = !Gen.long() val name = !Gen.string() Person(id, prefix + name) } forAll(personGen) { it.name.startsWith(prefix) } } })
b607931605001f9137bb5ca66dd0d4b085a96551
[ "Kotlin" ]
1
Kotlin
KyongSik-Yoon/kotlintest
211019bb3b3904e976203b91a1d79f5baa689ae5
101a913c3199ca4e44b8926820a51217cd38b30a
refs/heads/master
<repo_name>ShahabBakht/hierarchical_lfads<file_sep>/conv_lfads.py import torch import torch.nn as nn from lfads import LFADS_Net from svlae import SVLAE_Net import time import pdb class Conv3d_LFADS_Net(nn.Module): def __init__(self, input_dims = (100,128,128), conv_type = '2d', #(100, 128, 128) channel_dims = (16, 32), obs_encoder_size = 32, obs_latent_size = 64, obs_controller_size = 32, conv_dense_size = 64, factor_size = 4, g_encoder_size = 64, c_encoder_size = 64, g_latent_size = 64, u_latent_size = 1, controller_size = 64, generator_size = 64, prior = {'g0' : {'mean' : {'value': 0.0, 'learnable' : True}, 'var' : {'value': 0.1, 'learnable' : False}}, 'u' : {'mean' : {'value': 0.0, 'learnable' : False}, 'var' : {'value': 0.1, 'learnable' : True}, 'tau' : {'value': 10, 'learnable' : True}}}, obs_params = {'gain' : {'value' : 1.0, 'learnable' : False}, 'bias' : {'value' : 0.0, 'learnable' : False}, 'tau' : {'value' : 10., 'learnable' : False}, 'var' : {'value' : 0.1, 'learnable' : True}}, deep_unfreeze_step = 1600, obs_early_stop_step = 2000, generator_burn = 0, obs_continue_step = 8000, ar1_start_step = 4000, clip_val=5.0, max_norm = 200, lfads_dropout=0.0, conv_dropout=0.0,do_normalize_factors=True, factor_bias = False, device='cpu'): super(Conv3d_LFADS_Net, self).__init__() self.conv_type = conv_type self.factor_size = factor_size self.obs_encoder_size = obs_encoder_size self.obs_latent_size = obs_latent_size self.obs_controller_size = obs_controller_size self.g_encoder_size = g_encoder_size self.c_encoder_size = c_encoder_size self.g_latent_size = g_latent_size self.u_latent_size = u_latent_size self.controller_size = controller_size self.generator_size = generator_size self.clip_val = clip_val self.max_norm = max_norm self.do_normalize_factors = do_normalize_factors self.factor_bias = factor_bias self.device= device self.input_dims = input_dims self.channel_dims = (1,) + channel_dims self.conv_layers = nn.ModuleList() self.conv_dense_size = conv_dense_size if self.conv_type == '2d': layer_dims = self.input_dims[1:] for n in range(1, len(self.channel_dims)): self.conv_layers.add_module('{}{}'.format('block', n), Conv2d_Block_1step(input_dims = layer_dims, in_f = self.channel_dims[n-1], out_f= self.channel_dims[n])) layer_dims = getattr(self.conv_layers, '{}{}'.format('block', n)).get_output_dims() self.deconv_layers = nn.ModuleList() for n in reversed(range(1, len(self.channel_dims))): self.deconv_layers.add_module('{}{}'.format('block', n), ConvTranspose2d_Block_1step(in_f = self.channel_dims[n], out_f= self.channel_dims[n-1])) elif self.conv_type == '3d': layer_dims = self.input_dims for n in range(1, len(self.channel_dims)): self.conv_layers.add_module('{}{}'.format('block', n), Conv3d_Block_1step(input_dims = layer_dims, in_f = self.channel_dims[n-1], out_f= self.channel_dims[n])) layer_dims = getattr(self.conv_layers, '{}{}'.format('block', n)).get_output_dims() self.deconv_layers = nn.ModuleList() for n in reversed(range(1, len(self.channel_dims))): self.deconv_layers.add_module('{}{}'.format('block', n), ConvTranspose3d_Block_1step(in_f = self.channel_dims[n], out_f= self.channel_dims[n-1])) elif self.conv_type == 'fix': pass # Placeholder if self.conv_type == '3d': self.conv_output_size = int(torch._np.prod(layer_dims[1:]) * self.channel_dims[-1]) else: self.conv_output_size = int(torch._np.prod(layer_dims[0:]) * self.channel_dims[-1]) self.conv_dense_size = self.conv_dense_size self.conv_dropout = nn.Dropout(conv_dropout) self.conv_dense_1 = nn.Linear(in_features= self.conv_output_size, out_features= self.conv_dense_size) self.conv_dense_2 = nn.Linear(in_features= self.factor_size, out_features = self.conv_dense_size) self.conv_dense_3 = nn.Linear(in_features= self.conv_dense_size, out_features = self.conv_output_size) self.RELU = nn.ReLU() # self.lfads_param = dict() print(self.device) print(torch.cuda.device_count()) # self.lfads = LFADS_Net(input_size= self.conv_dense_size, # g_encoder_size=self.g_encoder_size, # c_encoder_size=self.c_encoder_size, # g_latent_size=self.g_latent_size, # u_latent_size=self.u_latent_size, # controller_size=self.controller_size, # generator_size=self.generator_size, # factor_size=self.factor_size, # prior=prior, # clip_val=self.clip_val, # dropout=lfads_dropout, # max_norm=self.max_norm, # do_normalize_factors=self.do_normalize_factors, # factor_bias=self.factor_bias, # device= self.device) self.calfads = SVLAE_Net(input_size = self.conv_dense_size, factor_size = self.factor_size, obs_encoder_size = self.obs_encoder_size, obs_latent_size = self.obs_latent_size, obs_controller_size = self.obs_controller_size, deep_g_encoder_size = self.g_encoder_size, deep_c_encoder_size = self.c_encoder_size, deep_g_latent_size = self.g_latent_size, deep_u_latent_size = self.u_latent_size, deep_controller_size = self.controller_size, generator_size = self.generator_size, prior = prior, clip_val = self.clip_val, generator_burn = generator_burn, dropout = lfads_dropout, do_normalize_factors = self.do_normalize_factors, factor_bias = self.factor_bias, max_norm = self.max_norm, deep_unfreeze_step = deep_unfreeze_step, obs_early_stop_step = obs_early_stop_step, obs_continue_step = obs_continue_step, ar1_start_step = ar1_start_step, obs_params = obs_params, device = self.device) self.register_parameter('u_posterior_mean',None) self.register_parameter('u_posterior_logvar',None) self.register_parameter('g_posterior_mean',None) self.register_parameter('g_posterior_logvar',None) self.register_parameter('g_prior_mean',self.calfads.deep_model.g_prior_mean) self.register_buffer('g_prior_logvar',self.calfads.deep_model.g_prior_logvar) self.register_parameter('u_prior_mean',self.calfads.obs_model.u_prior_mean) self.register_buffer('u_prior_logvar',self.calfads.obs_model.u_prior_logvar) def forward(self, x): batch_size, num_ch, seq_len, w, h = x.shape if self.conv_type == '3d': frame_per_block = 5 num_blocks = int(seq_len/frame_per_block) x = x.view(batch_size, num_ch, num_blocks, frame_per_block, w, h).contiguous() x = x.permute(0, 2, 1, 3, 4, 5).contiguous() x = x.view(batch_size * num_blocks, num_ch, frame_per_block, w, h).contiguous() else: x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.view(batch_size*seq_len,num_ch,w,h).contiguous() Ind = list() conv_tic = time.time() for n, layer in enumerate(self.conv_layers): x, ind1 = layer(x) Ind.append(ind1) conv_toc = time.time() num_out_ch = x.shape[1] w_out = x.shape[-1] h_out = x.shape[-2] if self.conv_type == '3d': x = x.view(batch_size, num_blocks, num_out_ch, frame_per_block, w_out, h_out).contiguous() x = x.permute(0, 2, 1, 3, 4, 5).contiguous() x = x.view(batch_size, num_out_ch, seq_len, w_out, h_out).contiguous() else: x = x.view(batch_size, seq_len, num_out_ch, w_out, h_out).contiguous() x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.permute(0, 2, 1, 3, 4) x = x.reshape(x.shape[0],x.shape[1],-1) x = self.conv_dense_1(x.view(batch_size, seq_len, w_out * h_out * num_out_ch)) x = self.RELU(x) conv_out = x # x = x.permute(1, 0, 2) lfads_tic = time.time() # factors, gen_inputs = self.lfads(x) recon_calfads, (factors, deep_gen_inputs) = self.calfads(x) lfads_toc = time.time() # print('conv t: ',conv_toc - conv_tic,' lfads t: ',lfads_toc - lfads_tic) # x = factors x = recon_calfads['data'] # x = x.permute(1, 0, 2) # x = self.conv_dense_2(x).exp() deconv_in = x x = self.conv_dense_3(x) # x = self.RELU(x) # call LFADS here: # x should be reshaped for LFADS [time x batch x cells]: # # LFADS output should be also reshaped back for the conv decoder x = x.reshape(x.shape[0], x.shape[1], num_out_ch, w_out, h_out) x = x.permute(0, 2, 1, 3, 4) if self.conv_type == '3d': x = x.view(batch_size, num_out_ch, num_blocks, frame_per_block, w_out, h_out).contiguous() x = x.permute(0, 2, 1, 3, 4, 5).contiguous() x = x.view(batch_size * num_blocks, num_out_ch, frame_per_block, w_out, h_out).contiguous() else: x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.view(batch_size * seq_len, num_out_ch, w_out, h_out).contiguous() for layer, ind in list(zip(self.deconv_layers, reversed(Ind))): x = layer(x, ind) if self.conv_type == '3d': x = x.view(batch_size, num_blocks, 1, frame_per_block, w, h).contiguous() x = x.permute(0, 2, 1, 3, 4, 5) x = x.view(batch_size, 1, seq_len, w, h) else: x = x.view(batch_size, seq_len, 1, w, h).contiguous() x = x.permute((0, 2, 1, 3, 4)).contiguous() # g_posterior = dict() # g_posterior_mean = self.lfads.g_posterior_mean # g_posterior_logvar = self.lfads.g_posterior_logvar u_posterior_mean = self.calfads.obs_model.u_posterior_mean u_posterior_logvar = self.calfads.obs_model.u_posterior_logvar g_posterior_mean = self.calfads.deep_model.g_posterior_mean g_posterior_logvar = self.calfads.deep_model.g_posterior_logvar recon = {} recon['data'] = x recon['spikes'] = recon_calfads['spikes'] recon['rates'] = recon_calfads['rates'] return recon, (factors, deep_gen_inputs), (g_posterior_mean,g_posterior_logvar), (u_posterior_mean, u_posterior_logvar), conv_out def normalize_factors(self): self.calfads.deep_model.normalize_factors() def change_parameter_grad_status(self, step, optimizer, scheduler, loading_checkpoint=False): return optimizer, scheduler class _ConvNd_Block(nn.ModuleList): def __init__(self, input_dims): super(_ConvNd_Block, self).__init__() self.input_dims = input_dims def forward(self, x): ind = None for layer in self: if nn.modules.pooling._MaxPoolNd in type(layer).__bases__ and layer.return_indices: x, ind = layer(x) else: x = layer(x) return x, ind def get_output_dims(self): def layer_out_dim(in_dim, layer): padding = layer.padding kernel_size = layer.kernel_size dilation = layer.dilation stride = layer.stride def out_dim(in_dim, padding, dilation, kernel_dim, stride): return int((in_dim + 2 * padding - dilation * (kernel_dim - 1) - 1)/stride + 1) return tuple([out_dim(i,p,d,k,s) for i,p,d,k,s in zip(in_dim, padding, dilation, kernel_size, stride)]) dims = self.input_dims for m in self: parents = type(m).__bases__ if nn.modules.conv._ConvNd in parents or nn.modules.pooling._MaxPoolNd in parents: dims = layer_out_dim(dims, m) return dims class Conv3d_Block_2step(_ConvNd_Block): def __init__(self, in_f, out_f, kernel_size=(3, 3, 3), dilation=(1, 1, 1), padding=(1, 1, 1), stride=(1, 1, 1), pool_size=(1, 4, 4), input_dims=(100, 100, 100)): super(Conv3d_Block_2step, self).__init__(input_dims) self.add_module('conv1', nn.Conv3d(in_f, out_f, kernel_size= kernel_size, padding= padding, dilation = dilation, stride= stride)) self.add_module('relu1', nn.ReLU()) self.add_module('conv2', nn.Conv3d(out_f, out_f, kernel_size= kernel_size, padding= padding, dilation= dilation, stride = stride)) self.add_module('pool1', nn.MaxPool3d(kernel_size= pool_size, stride= pool_size, padding=(0, 0, 0), dilation=(1, 1, 1), return_indices= True)) self.add_module('relu2', nn.ReLU()) self.output_dims = self.get_output_dims() class Conv2d_fix(nn.Conv2d): def __init__(self, in_f, out_f, kernel_size = 11, stride=1, padding=5, radius=4, center = (5,5)): super(Conv2d_fix, self).__init__(in_f, out_f, kernel_size = kernel_size, stride = stride, padding = kernel_size//2) self.center = center self.radius = radius self.kernel_size = kernel_size self.Weights = nn.Parameter(self.make_weights()) def forward(self,x): return super(Conv2d_fix)._conv_forward(x, self.Weights) def make_weights(self): import skimage.draw as draw rr, cc = draw.circle(r = self.center[0], c = self.center[1] , radius=self.radius) w = torch.zeros((1,1,self.kernel_size,self.kernel_size)) w[0,0,rr,cc] += 1 return w class Conv2d_Block_1step(_ConvNd_Block): def __init__(self, in_f, out_f, kernel_size=(3, 3), dilation=(1, 1), padding=(1, 1), stride=(1, 1), pool_size=(2, 2), input_dims=(128, 128)): super(Conv2d_Block_1step, self).__init__(input_dims) self.add_module('conv1', nn.Conv2d(in_f, out_f, kernel_size= kernel_size, padding= padding, dilation = dilation, stride= stride)) self.add_module('relu1', nn.ReLU()) self.add_module('pool1', nn.MaxPool2d(kernel_size= pool_size, stride= pool_size, padding=(0, 0), dilation=(1, 1), return_indices= True)) class Conv3d_Block_1step(_ConvNd_Block): def __init__(self, in_f, out_f, kernel_size=(1, 3, 3), dilation=(1, 1, 1), padding=(0, 1, 1), stride=(1, 1, 1), pool_size=(1, 2, 2), input_dims=(100, 100, 100)): super(Conv3d_Block_1step, self).__init__(input_dims) self.add_module('conv1', nn.Conv3d(in_f, out_f, kernel_size= kernel_size, padding= padding, dilation = dilation, stride= stride)) self.add_module('relu1', nn.ReLU()) self.add_module('pool1', nn.MaxPool3d(kernel_size= pool_size, stride= pool_size, padding=(0, 0, 0), dilation=(1, 1, 1), return_indices= True)) class _ConvTransposeNd_Block(nn.ModuleList): def __init__(self): super(_ConvTransposeNd_Block, self).__init__() def forward(self, x, ind): for layer in self: if nn.modules.pooling._MaxUnpoolNd in type(layer).__bases__: x = layer(x, ind) else: x = layer(x) return x class ConvTranspose2d_Block_1step(_ConvTransposeNd_Block): def __init__(self, in_f, out_f): super(ConvTranspose2d_Block_1step, self).__init__() self.add_module('unpool1', nn.MaxUnpool2d(kernel_size=(2,2))) self.add_module('deconv1', nn.ConvTranspose2d(in_channels= in_f, out_channels= out_f, kernel_size= (3,3), padding= (1,1), dilation= (1,1))) class ConvTranspose3d_Block_1step(_ConvTransposeNd_Block): def __init__(self, in_f, out_f): super(ConvTranspose3d_Block_1step, self).__init__() self.add_module('unpool1', nn.MaxUnpool3d(kernel_size=(1,2,2))) self.add_module('deconv1', nn.ConvTranspose3d(in_channels= in_f, out_channels= out_f, kernel_size= (1,3,3), padding= (0,1,1), dilation= (1,1,1))) self.add_module('relu1', nn.ReLU()) if __name__ == "__main__": import pdb from utils import load_parameters x = torch.rand((10, 1, 20, 128, 128)).to('cuda') print('input size', x.shape) batch_size, num_ch, seq_len, w, h = x.shape hyperparams = load_parameters('./hyperparameters/lorenz/conv3d_lfads.yaml') model = Conv3d_LFADS_Net(input_dims = (100,128,128), conv_type = '2d', channel_dims = hyperparams['model']['channel_dims'], obs_encoder_size = hyperparams['model']['obs_encoder_size'], obs_latent_size = hyperparams['model']['obs_latent_size'], obs_controller_size = hyperparams['model']['obs_controller_size'], conv_dense_size = hyperparams['model']['conv_dense_size'], factor_size = hyperparams['model']['factor_size'], g_encoder_size = hyperparams['model']['g_encoder_size'], c_encoder_size = hyperparams['model']['c_encoder_size'], g_latent_size = hyperparams['model']['g_latent_size'], u_latent_size = hyperparams['model']['u_latent_size'], controller_size = hyperparams['model']['controller_size'], generator_size = hyperparams['model']['generator_size'], prior = hyperparams['model']['prior'], obs_params = hyperparams['model']['obs'], deep_unfreeze_step = hyperparams['model']['deep_unfreeze_step'], obs_early_stop_step = hyperparams['model']['obs_early_stop_step'], generator_burn = hyperparams['model']['generator_burn'], obs_continue_step = hyperparams['model']['obs_continue_step'], ar1_start_step = hyperparams['model']['ar1_start_step'], clip_val = hyperparams['model']['clip_val'], max_norm = hyperparams['model']['max_norm'], lfads_dropout = hyperparams['model']['lfads_dropout'], conv_dropout = hyperparams['model']['conv_dropout'], do_normalize_factors = hyperparams['model']['normalize_factors'], factor_bias = hyperparams['model']['factor_bias'], device = 'cuda').to('cuda') recon, (factors, deep_gen_inputs), (g_posterior_mean,g_posterior_logvar), (u_posterior_mean, u_posterior_logvar), conv_out = model(x) pdb.set_trace() # from synthetic_data import * # lorenz = LorenzSystem(num_inits= 100, # dt= 0.01) # net = EmbeddedLowDNetwork(low_d_system = lorenz, # net_size = 64, # base_rate = 1.0, # dt = 0.01) # Ca_synth = SyntheticCalciumDataGenerator(net, 100, trainp = 0.8, # burn_steps = 1000, num_trials = 10, num_steps= 20, # tau_cal=0.1, dt_cal= 0.01, sigma=0.2, # frame_width=128, frame_height=128, cell_radius=4, save=True) # data_dict = Ca_synth.generate_dataset() # train_dl = torch.utils.data.DataLoader(SyntheticCalciumVideoDataset(traces= data_dict['train_fluor'], cells=data_dict['cells'], device='cuda'), batch_size=1) # conv = Conv2d_fix(in_f = 1, out_f = 1, kernel_size = 11, stride=1, padding=5, radius=4, center = (5,5)) # out_data = torch.zeros((800,1,1,20,128,128)) # in_data = torch.zeros((800,1,1,20,128,128)).to('cuda') # n=0 # for data in train_dl: # # print(data[0].shape) # # in_data[n,:] = data[0] # # out_data[n,:] = torch.nn.functional.conv2d(in_data[n,:],W,stride=1, padding=5)#conv.conv3d_forward(in_data,W) # for t in range(0,data[0].shape[2]): # in_data[n,:,:,t,:,:] = data[0][:,:,t,:,:] # out_data[n,:,:,t,:,:] = conv(in_data[n,:,:,t,:,:])#conv.conv3d_forward(in_data,W) # pdb.set_trace() # n+=1
01116ae24e9164af6a7e1f76c00ca6d3a687c1a2
[ "Python" ]
1
Python
ShahabBakht/hierarchical_lfads
57b9e2216e88df00296b221a0caadef0bb530050
8150816d4bbfcfa57cf8491554d50cf17a469ecf
refs/heads/master
<repo_name>hason/composer-puli-plugin<file_sep>/README.md Puli Plugin for Composer ======================== [![Build Status](https://travis-ci.org/puli/composer-puli-plugin.png?branch=master)](https://travis-ci.org/puli/composer-puli-plugin) [![Scrutinizer Quality Score](https://scrutinizer-ci.com/g/puli/composer-puli-plugin/badges/quality-score.png?s=f1fbf1884aed7f896c18fc237d3eed5823ac85eb)](https://scrutinizer-ci.com/g/puli/composer-puli-plugin/) [![Code Coverage](https://scrutinizer-ci.com/g/puli/composer-puli-plugin/badges/coverage.png?s=5d83649f6fc3a9754297da9dc0d997be212c9145)](https://scrutinizer-ci.com/g/puli/composer-puli-plugin/) [![SensioLabsInsight](https://insight.sensiolabs.com/projects/c519f170-f530-4f3a-83e9-0516583ddc92/mini.png)](https://insight.sensiolabs.com/projects/c519f170-f530-4f3a-83e9-0516583ddc92) [![Latest Stable Version](https://poser.pugx.org/puli/composer-puli-plugin/v/stable.png)](https://packagist.org/packages/puli/composer-puli-plugin) [![Total Downloads](https://poser.pugx.org/puli/composer-puli-plugin/downloads.png)](https://packagist.org/packages/puli/composer-puli-plugin) [![Dependency Status](https://www.versioneye.com/php/puli:composer-puli-plugin/1.0.0/badge.png)](https://www.versioneye.com/php/puli:composer-puli-plugin/1.0.0) Latest release: [1.0.0-alpha1](https://packagist.org/packages/puli/composer-puli-plugin#1.0.0-alpha1) PHP >= 5.3.9 This plugin integrates the [Puli library] into [Composer]. Whenever you install or update your Composer dependencies, a Puli repository is generated from the composer.json files of the installed packages: ```json { "name": "acme/blog", "extra": { "resources": { "/acme/blog": "resources" } } } ``` You can include the generated repository in your code and access all exported resources by their Puli paths: ```php $repo = require __DIR__.'/vendor/resource-repository.php'; // /path/to/project/vendor/acme/blog/resources/config/config.yml echo $repo->get('/acme/blog/config/config.yml')->getContents(); ``` Authors ------- * [<NAME>] a.k.a. [@webmozart] * [The Community Contributors] Installation ------------ Follow the [Getting Started] guide to install the Puli plugin in your project. Documentation ------------- Read the [Plugin Documentation] if you want to learn more about configuring repositories with the Composer plugin. Contribute ---------- Contributions to are very welcome! * Report any bugs or issues you find on the [issue tracker]. * You can grab the source code at Puli’s [Git repository]. Support ------- If you are having problems, send a mail to <EMAIL> or shout out to [@webmozart] on Twitter. License ------- Puli and its documentation are licensed under the [MIT license]. [<NAME>]: http://webmozarts.com [The Community Contributors]: https://github.com/puli/composer-puli-plugin/graphs/contributors [Puli library]: https://github.com/puli/puli [Composer]: https://getcomposer.org [Getting Started]: http://puli.readthedocs.org/en/latest/getting-started/application-devs.html [Plugin Documentation]: http://puli.readthedocs.org/en/latest/repository-management/composer.html [issue tracker]: https://github.com/puli/puli/issues [Git repository]: https://github.com/puli/composer-puli-plugin [@webmozart]: https://twitter.com/webmozart [MIT license]: LICENSE <file_sep>/src/RepositoryDumper/RepositoryDumper.php <?php /* * This file is part of the Composer Puli Plugin. * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Puli\Extension\Composer\RepositoryDumper; use Composer\Installer\InstallationManager; use Composer\Package\PackageInterface; use Composer\Util\Filesystem; use Puli\Extension\Composer\RepositoryBuilder\RepositoryBuilder; use Puli\Filesystem\PhpCacheRepository; use Puli\Repository\ResourceRepository; /** * Dumps a resource repository based on the Composer configuration. * * @since 1.0 * @author <NAME> <<EMAIL>> */ class RepositoryDumper { /** * @var string */ private $vendorDir; /** * @var PackageInterface */ private $projectPackage; /** * @var PackageInterface[] */ private $installedPackages; /** * @var RepositoryBuilder */ private $repoBuilder; public function setVendorDir($vendorDir) { $this->vendorDir = $vendorDir; } public function setProjectPackage(PackageInterface $package) { $this->projectPackage = $package; } public function setInstalledPackages(array $packages) { $this->installedPackages = $packages; } public function setRepositoryBuilder(RepositoryBuilder $repoLoader) { $this->repoBuilder = $repoLoader; } public function dumpRepository() { $repo = new ResourceRepository(); $filesystem = new Filesystem(); $filesystem->ensureDirectoryExists($this->vendorDir); $vendorPath = $filesystem->normalizePath(realpath($this->vendorDir)); $this->repoBuilder->loadPackage($this->projectPackage); foreach ($this->installedPackages as $package) { /** @var \Composer\Package\PackageInterface $package */ $this->repoBuilder->loadPackage($package); } $this->repoBuilder->buildRepository($repo); $filesystem->ensureDirectoryExists($vendorPath.'/composer'); PhpCacheRepository::dumpRepository($repo, $vendorPath.'/composer'); $locatorCode = <<<LOCATOR <?php // resource-repository.php @generated by the Composer Puli plugin use Puli\Filesystem\PhpCacheRepository; return new PhpCacheRepository(__DIR__.'/composer'); LOCATOR; file_put_contents($vendorPath.'/resource-repository.php', $locatorCode); } } <file_sep>/src/PuliPlugin.php <?php /* * This file is part of the Composer Puli Plugin. * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Puli\Extension\Composer; use Composer\Composer; use Composer\EventDispatcher\EventSubscriberInterface; use Composer\IO\IOInterface; use Composer\Plugin\PluginInterface; use Composer\Script\CommandEvent; use Composer\Script\ScriptEvents; use Puli\Extension\Composer\RepositoryBuilder\RepositoryBuilder; use Puli\Extension\Composer\RepositoryDumper\RepositoryDumper; use Puli\Repository\ResourceRepository; /** * A plugin for managing resources of Composer dependencies. * * @since 1.0 * @author <NAME> <<EMAIL>> */ class PuliPlugin implements PluginInterface, EventSubscriberInterface { const VERSION = '@package_version@'; const RELEASE_DATE = '@release_date@'; /** * @var RepositoryDumper */ private $dumper; /** * @var bool */ private $firstRun = true; /** * {@inheritdoc} */ public static function getSubscribedEvents() { return array( ScriptEvents::POST_INSTALL_CMD => 'dumpRepository', ScriptEvents::POST_UPDATE_CMD => 'dumpRepository', ); } public function __construct(RepositoryDumper $dumper = null) { $this->dumper = $dumper ?: new RepositoryDumper(); } /** * Apply plugin modifications to composer * * @param Composer $composer * @param IOInterface $io */ public function activate(Composer $composer, IOInterface $io) { $composer->getEventDispatcher()->addSubscriber($this); } public function dumpRepository(CommandEvent $event) { // This method is called twice. Run it only once. if (!$this->firstRun) { return; } $this->firstRun = false; $composer = $event->getComposer(); $repositoryManager = $composer->getRepositoryManager(); $this->dumper->setVendorDir($composer->getConfig()->get('vendor-dir')); $this->dumper->setProjectPackage($composer->getPackage()); $this->dumper->setInstalledPackages($repositoryManager->getLocalRepository()->getPackages()); $this->dumper->setRepositoryBuilder(new RepositoryBuilder($composer->getInstallationManager())); $event->getIO()->write('<info>Generating resource repository</info>'); $this->dumper->dumpRepository(); } } <file_sep>/src/RepositoryBuilder/RepositoryBuilder.php <?php /* * This file is part of the Composer Puli Plugin. * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Puli\Extension\Composer\RepositoryBuilder; use Composer\Installer\InstallationManager; use Composer\Package\AliasPackage; use Composer\Package\PackageInterface; use Composer\Package\RootPackageInterface; use Puli\Extension\Composer\PackageGraph\PackageGraph; use Puli\Filesystem\Resource\LocalDirectoryResource; use Puli\Filesystem\Resource\LocalFileResource; use Puli\Filesystem\Resource\LocalResourceInterface; use Puli\Repository\ManageableRepositoryInterface; use Puli\Resource\DirectoryResourceInterface; /** * @since 1.0 * @author <NAME> <<EMAIL>> */ class RepositoryBuilder { /** * @var InstallationManager */ private $installationManager; /** * @var PackageInterface[] */ private $packages = array(); /** * @var PackageGraph */ private $packageGraph; /** * @var array[] */ private $packageOverrides = array(); /** * @var array[] */ private $resources = array(); /** * @var array[] */ private $knownPaths = array(); /** * @var array[] */ private $tags = array(); public function __construct(InstallationManager $installationManager) { $this->installationManager = $installationManager; $this->packageGraph = new PackageGraph(); } public function loadPackage(PackageInterface $package) { // We don't care about aliases, only "the real deal" if ($package instanceof AliasPackage) { return; } $packageName = $package->getName(); $this->packages[$packageName] = $package; } public function buildRepository(ManageableRepositoryInterface $repo) { $this->loadPackageConfiguration(); $this->buildPackageGraph(); $this->detectConflicts(); $this->addResources($repo); $this->tagResources($repo); return $repo; } private function loadPackageConfiguration() { foreach ($this->packages as $packageName => $package) { $packageRoot = $this->getInstallPath($package); $extra = $package->getExtra(); if (!isset($extra['puli'])) { return; } $config = $extra['puli']; $this->packageGraph->addPackage($packageName); if (isset($config['resources'])) { if (!is_array($config['resources'])) { throw new ResourceDefinitionException(sprintf( 'The "resources" key in the composer.json of the "%s" '. 'package should contain an array.', $packageName )); } $this->processResources($config['resources'], $packageName, $packageRoot); } if (isset($config['override'])) { if (!is_array($config['override']) && !is_string($config['override'])) { throw new ResourceDefinitionException(sprintf( 'The "override" key in the composer.json of the "%s" '. 'package should contain a string or an array.', $packageName )); } $this->processOverrides((array) $config['override'], $packageName); } if (isset($config['package-order']) && $package instanceof RootPackageInterface) { if (!is_array($config['package-order'])) { throw new ResourceDefinitionException(sprintf( 'The "package-order" key in the composer.json of the "%s" '. 'package should contain an array.', $packageName )); } $this->processPackageOrder($config['package-order']); } if (isset($config['tags'])) { if (!is_array($config['tags'])) { throw new ResourceDefinitionException(sprintf( 'The "tags" key in the composer.json of the "%s" '. 'package should contain an array.', $packageName )); } $this->processTags($config['tags']); } } } /** * @param array $resources * @param $currentPackageName * * @param $packageRoot */ private function processResources(array $resources, $currentPackageName, $packageRoot) { // Export shorter paths before longer paths ksort($resources); if (!isset($this->resources[$currentPackageName])) { $this->resources[$currentPackageName] = array(); } foreach ($resources as $path => $relativePaths) { if (!isset($this->resources[$currentPackageName][$path])) { $this->resources[$currentPackageName][$path] = array(); } foreach ((array) $relativePaths as $relativePath) { // Reference to install path of other package if ('@' === $relativePath[0] && false !== ($pos = strpos($relativePath, ':'))) { $refPackageName = substr($relativePath, 1, $pos - 1); if (!isset($this->packages[$refPackageName])) { throw new ResourceDefinitionException(sprintf( 'The package "%s" referred to a non-existing '. 'package "%s" in the resource path "%s". Did you '. 'forget to require the package "%s"?', $currentPackageName, $refPackageName, $relativePath, $refPackageName )); } $refPackage = $this->packages[$refPackageName]; $refPackageRoot = $this->getInstallPath($refPackage); $absolutePath = $refPackageRoot.'/'.substr($relativePath, $pos + 1); } else { $absolutePath = $packageRoot.'/'.$relativePath; } $resource = is_dir($absolutePath) ? new LocalDirectoryResource($absolutePath) : new LocalFileResource($absolutePath); // Packages can set a repository path to multiple local paths $this->resources[$currentPackageName][$path][] = $resource; // Store information necessary to detect conflicts later $this->prepareConflictDetection($path, $resource, $currentPackageName); } } } /** * @param $path * @param LocalResourceInterface $resource * @param $currentPackageName */ private function prepareConflictDetection($path, LocalResourceInterface $resource, $currentPackageName) { if (!isset($this->knownPaths[$path])) { $this->knownPaths[$path] = array(); } $this->knownPaths[$path][$currentPackageName] = true; // Detect conflicts in sub-directories if ($resource instanceof DirectoryResourceInterface) { $basePath = rtrim($path, '/').'/'; foreach ($resource->listEntries() as $entry) { $this->prepareConflictDetection($basePath.basename($entry->getLocalPath()), $entry, $currentPackageName); } } } private function processOverrides(array $overrides, $packageName) { if (!isset($this->packageOverrides[$packageName])) { $this->packageOverrides[$packageName] = array(); } foreach ($overrides as $override) { $this->packageOverrides[$packageName][] = $override; } } /** * @param array $packageOrder */ private function processPackageOrder(array $packageOrder) { // Make sure we have numeric, ascending keys here $packageOrder = array_values($packageOrder); // Each package overrides the previous one in the list for ($i = 1, $l = count($packageOrder); $i < $l; ++$i) { if (!isset($this->packageOverrides[$packageOrder[$i]])) { $this->packageOverrides[$packageOrder[$i]] = array(); } $this->packageOverrides[$packageOrder[$i]][] = $packageOrder[$i - 1]; } } private function processTags(array $tags) { foreach ($tags as $repositoryPath => $pathTags) { if (!isset($this->tags[$repositoryPath])) { $this->tags[$repositoryPath] = array(); } foreach ((array) $pathTags as $tag) { // Store tags as keys to prevent duplicates $this->tags[$repositoryPath][$tag] = true; } } } private function buildPackageGraph() { foreach ($this->packageOverrides as $overridingPackage => $overriddenPackages) { foreach ($overriddenPackages as $overriddenPackage) { // The overridden package must be processed before the // overriding package // Check that the overridden package is actually loaded TODO test if ($this->packageGraph->hasPackage($overriddenPackage)) { $this->packageGraph->addEdge($overriddenPackage, $overridingPackage); } } } // Free unneeded space unset($this->packageOverrides); } private function detectConflicts() { // Check whether any of the paths were registered by more than one // package and if yes, check if the order between the packages is // defined foreach ($this->knownPaths as $path => $packageNames) { // Attention, the package names are stored in the keys if (1 === count($packageNames)) { continue; } $orderedPackages = $this->packageGraph->getSortedPackages(array_keys($packageNames)); // An edge must exist between each package pair in the sorted set, // otherwise the dependencies are not sufficiently defined for ($i = 1, $l = count($orderedPackages); $i < $l; ++$i) { if (!$this->packageGraph->hasEdge($orderedPackages[$i - 1], $orderedPackages[$i])) { throw new ResourceConflictException(sprintf( 'The packages "%s" and "%s" add resources for the same '. 'path "%s", but have no override order defined '. "between them.\n\nResolutions:\n\n(1) Add the key ". '"override" to the composer.json of one package and '. "set its value to the other package name.\n(2) Add the ". 'key "override-order" to the composer.json of the root '. 'package and define the order of the packages there.', $orderedPackages[$i - 1], $orderedPackages[$i], $path )); } } } } private function addResources(ManageableRepositoryInterface $repo) { $packageOrder = $this->packageGraph->getSortedPackages(); foreach ($packageOrder as $packageName) { if (!isset($this->resources[$packageName])) { continue; } foreach ($this->resources[$packageName] as $path => $resources) { foreach ($resources as $resource) { $repo->add($path, $resource); } } } } private function tagResources(ManageableRepositoryInterface $repo) { foreach ($this->tags as $path => $tags) { foreach ($tags as $tag => $_) { $repo->tag($path, $tag); } } } private function getInstallPath(PackageInterface $package) { if ($package instanceof RootPackageInterface) { return getcwd(); } return $this->installationManager->getInstallPath($package); } } <file_sep>/tests/PuliPluginTest.php <?php /* * This file is part of the Composer Puli Plugin. * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Puli\Extension\Composer\Tests; use Composer\Composer; use Composer\Config; use Composer\Repository\RepositoryManager; use Composer\Script\CommandEvent; use Composer\Script\ScriptEvents; use Puli\Extension\Composer\PuliPlugin; /** * @since 1.0 * @author <NAME> <<EMAIL>> */ class PuliPluginTest extends \PHPUnit_Framework_TestCase { /** * @var \PHPUnit_Framework_MockObject_MockObject */ private $dumper; /** * @var PuliPlugin */ private $plugin; /** * @var Composer */ private $composer; /** * @var \PHPUnit_Framework_MockObject_MockObject */ private $io; private $localRepository; private $repositoryManager; private $installationManager; /** * @var Config */ private $config; private $projectPackage; private $installedPackages; protected function setUp() { $this->dumper = $this->getMockBuilder('Puli\Extension\Composer\RepositoryDumper\RepositoryDumper') ->disableOriginalConstructor() ->getMock(); $this->plugin = new PuliPlugin($this->dumper); $this->io = $this->getMock('Composer\IO\IOInterface'); $this->config = new Config(); $this->localRepository = $this->getMock('Composer\Repository\WritableRepositoryInterface'); $this->repositoryManager = new RepositoryManager($this->io, $this->config); $this->repositoryManager->setLocalRepository($this->localRepository); $this->installationManager = $this->getMockBuilder('Composer\Installer\InstallationManager') ->disableOriginalConstructor() ->getMock(); $this->projectPackage = $this->getMock('Composer\Package\RootPackageInterface'); $this->installedPackages = array( $this->getMock('Composer\Package\PackageInterface'), $this->getMock('Composer\Package\PackageInterface'), ); $this->localRepository->expects($this->any()) ->method('getPackages') ->will($this->returnValue($this->installedPackages)); $this->composer = new Composer(); $this->composer->setRepositoryManager($this->repositoryManager); $this->composer->setInstallationManager($this->installationManager); $this->composer->setConfig($this->config); $this->composer->setPackage($this->projectPackage); } public function testActivate() { $dispatcher = $this->getMockBuilder('Composer\EventDispatcher\EventDispatcher') ->disableOriginalConstructor() ->getMock(); $dispatcher->expects($this->once()) ->method('addSubscriber') ->with($this->plugin); $this->composer->setEventDispatcher($dispatcher); $this->plugin->activate($this->composer, $this->io); } public function provideEventNames() { return array( array(ScriptEvents::POST_INSTALL_CMD), array(ScriptEvents::POST_UPDATE_CMD), ); } /** * @dataProvider provideEventNames */ public function testEventListeners($eventName) { $event = new CommandEvent($eventName, $this->composer, $this->io); $listeners = PuliPlugin::getSubscribedEvents(); $this->assertArrayHasKey($eventName, $listeners); $listener = $listeners[$eventName]; $this->config->merge(array( 'config' => array( 'vendor-dir' => 'VENDOR/DIR', ), )); $this->dumper->expects($this->once()) ->method('setVendorDir') ->with('VENDOR/DIR'); $this->dumper->expects($this->once()) ->method('setProjectPackage') ->with($this->projectPackage); $this->dumper->expects($this->once()) ->method('setInstalledPackages') ->with($this->installedPackages); $this->dumper->expects($this->once()) ->method('setRepositoryBuilder') ->with($this->isInstanceOf('Puli\Extension\Composer\RepositoryBuilder\RepositoryBuilder')); $this->io->expects($this->once()) ->method('write') ->with('<info>Generating resource repository</info>'); $this->plugin->$listener($event); } /** * @dataProvider provideEventNames * @depends testEventListeners */ public function testEventListenersOnlyProcessedOnFirstCall($eventName) { // Execute normal test $this->testEventListeners($eventName); // Now fire again $event = new CommandEvent($eventName, $this->composer, $this->io); $listeners = PuliPlugin::getSubscribedEvents(); $listener = $listeners[$eventName]; $this->plugin->$listener($event); } } <file_sep>/tests/RepositoryBuilder/RepositoryBuilderTest.php <?php /* * This file is part of the Composer Puli Plugin. * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Puli\Extension\Composer\Tests\RepositoryBuilder; use Puli\Extension\Composer\RepositoryBuilder\RepositoryBuilder; use Puli\Filesystem\Resource\LocalDirectoryResource; /** * @since 1.0 * @author <NAME> <<EMAIL>> */ class RepositoryBuilderTest extends \PHPUnit_Framework_TestCase { /** * @var \PHPUnit_Framework_MockObject_MockObject */ private $im; /** * @var string[] */ private $packageRoots; /** * @var \PHPUnit_Framework_MockObject_MockObject */ private $repo; /** * @var RepositoryBuilder */ private $builder; private $package1Root; private $package2Root; private $package3Root; private $previousWd; private $cwd; protected function setUp() { $packageRoots = &$this->packageRoots; $this->im = $this->getMockBuilder('Composer\Installer\InstallationManager') ->disableOriginalConstructor() ->getMock(); $this->im->expects($this->any()) ->method('getInstallPath') ->will($this->returnCallback(function ($package) use (&$packageRoots) { return $packageRoots[spl_object_hash($package)]; })); $this->repo = $this->getMock('Puli\Repository\ManageableRepositoryInterface'); $this->builder = new RepositoryBuilder($this->im); $this->package1Root = __DIR__.'/Fixtures/package1'; $this->package2Root = __DIR__.'/Fixtures/package2'; $this->package3Root = __DIR__.'/Fixtures/package3'; $this->previousWd = getcwd(); $this->cwd = __DIR__.'/Fixtures/root-package'; chdir($this->cwd); } protected function tearDown() { chdir($this->previousWd); } public function testIgnorePackageWithoutExtras() { $this->repo->expects($this->never()) ->method('add'); $package = $this->createPackage($this->package1Root, array()); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testIgnorePackageWithoutPuli() { $this->repo->expects($this->never()) ->method('add'); $package = $this->createPackage($this->package1Root, array( 'extra' => array( ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testIgnorePackageWithoutResources() { $this->repo->expects($this->never()) ->method('add'); $package = $this->createPackage($this->package1Root, array( 'extra' => array( 'puli' => array( ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testAddResources() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/package/css', new LocalDirectoryResource($this->package1Root.'/assets/css')); $package = $this->createPackage($this->package1Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package' => 'resources', '/acme/package/css' => 'assets/css', ), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testAddResourcesInRootPackage() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package', new LocalDirectoryResource($this->cwd.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/package/css', new LocalDirectoryResource($this->cwd.'/assets/css')); $package = $this->createRootPackage(array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package' => 'resources', '/acme/package/css' => 'assets/css', ), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testAddResourcesFromOtherPackagesInstallPath() { $this->repo->expects($this->once()) ->method('add') ->with('/acme/package', new LocalDirectoryResource($this->package2Root.'/resources')); $package1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package' => '@acme/package2:resources', ), ), ), )); $package2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', )); $this->builder->loadPackage($package1); $this->builder->loadPackage($package2); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Extension\Composer\RepositoryBuilder\ResourceDefinitionException */ public function testFailIfReferencedPackageCouldNotBeFound() { $this->repo->expects($this->never()) ->method('add'); $package1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package' => '@acme/package2:resources', ), ), ), )); $this->builder->loadPackage($package1); $this->builder->buildRepository($this->repo); } public function testIgnoreAliasPackages() { $this->repo->expects($this->never()) ->method('add'); $package = $this->createAliasPackage(array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package' => 'resources', '/acme/package/css' => 'assets/css', ), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Filesystem\FilesystemException */ public function testFailIfResourceNotFound() { $package = $this->createPackage($this->package1Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package' => 'foobar', ), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testIgnoreResourceOrder() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/package/css', new LocalDirectoryResource($this->package1Root.'/assets/css')); $package = $this->createPackage($this->package1Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package/css' => 'assets/css', '/acme/package' => 'resources', ), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testExportResourceWithMultipleLocalPaths() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/package', new LocalDirectoryResource($this->package1Root.'/assets')); $package = $this->createPackage($this->package1Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package' => array('resources', 'assets'), ), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testOverrideExistingPackage() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/overridden/css', new LocalDirectoryResource($this->package1Root.'/assets/css')); $this->repo->expects($this->at(2)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package2Root.'/override')); $this->repo->expects($this->at(3)) ->method('add') ->with('/acme/overridden/css', new LocalDirectoryResource($this->package2Root.'/css-override')); $overridingPackage = $this->createPackage($this->package2Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'override', '/acme/overridden/css' => 'css-override', ), 'override' => 'acme/overridden', ), ), )); $overriddenPackage = $this->createPackage($this->package1Root, array( 'name' => 'acme/overridden', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', '/acme/overridden/css' => 'assets/css', ), ), ), )); // Load overridden package first $this->builder->loadPackage($overriddenPackage); $this->builder->loadPackage($overridingPackage); $this->builder->buildRepository($this->repo); } public function testOverrideFuturePackage() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package2Root.'/override')); $overridingPackage = $this->createPackage($this->package2Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'override', ), 'override' => 'acme/overridden', ), ), )); $overriddenPackage = $this->createPackage($this->package1Root, array( 'name' => 'acme/overridden', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', ), ), ), )); // Load overridden package last $this->builder->loadPackage($overridingPackage); $this->builder->loadPackage($overriddenPackage); $this->builder->buildRepository($this->repo); } public function testOverrideChain() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package2Root.'/override')); $this->repo->expects($this->at(2)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package3Root.'/override2')); $package3 = $this->createPackage($this->package3Root, array( 'name' => 'acme/priority2', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'override2', ), 'override' => 'acme/priority1', ), ), )); $package2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/priority1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'override', ), 'override' => 'acme/priority0', ), ), )); $package1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/priority0', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', ), ), ), )); $this->builder->loadPackage($package1); $this->builder->loadPackage($package2); $this->builder->loadPackage($package3); $this->builder->buildRepository($this->repo); } public function testOverrideMultiplePackages() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/overridden1', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/overridden2', new LocalDirectoryResource($this->package2Root.'/resources')); $this->repo->expects($this->at(2)) ->method('add') ->with('/acme/overridden1', new LocalDirectoryResource($this->package3Root.'/override1')); $this->repo->expects($this->at(3)) ->method('add') ->with('/acme/overridden2', new LocalDirectoryResource($this->package3Root.'/override2')); $overridingPackage = $this->createPackage($this->package3Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden1' => 'override1', '/acme/overridden2' => 'override2', ), 'override' => array('acme/overridden1', 'acme/overridden2'), ), ), )); $overriddenPackage1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/overridden1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden1' => 'resources', ), ), ), )); $overriddenPackage2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/overridden2', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden2' => 'resources', ), ), ), )); // Load overridden package first $this->builder->loadPackage($overriddenPackage1); $this->builder->loadPackage($overriddenPackage2); $this->builder->loadPackage($overridingPackage); $this->builder->buildRepository($this->repo); } public function testOverrideNonExistingPackage() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package2Root.'/override')); $overridingPackage = $this->createPackage($this->package2Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'override', ), 'override' => 'acme/overridden', ), ), )); $this->builder->loadPackage($overridingPackage); $this->builder->buildRepository($this->repo); } public function testOverrideWithMultipleDirectories() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package2Root.'/override')); $this->repo->expects($this->at(2)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package2Root.'/css-override')); $overridingPackage = $this->createPackage($this->package2Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => array('override', 'css-override'), ), 'override' => 'acme/overridden', ), ), )); $overriddenPackage = $this->createPackage($this->package1Root, array( 'name' => 'acme/overridden', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', ), ), ), )); $this->builder->loadPackage($overridingPackage); $this->builder->loadPackage($overriddenPackage); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Extension\Composer\RepositoryBuilder\ResourceConflictException */ public function testConflictIfSamePathsButNoOverrideStatement() { $this->repo->expects($this->never()) ->method('add'); $overridingPackage1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', ), ), ), )); $overridingPackage2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'override', ), ), ), )); $this->builder->loadPackage($overridingPackage1); $this->builder->loadPackage($overridingPackage2); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Extension\Composer\RepositoryBuilder\ResourceConflictException */ public function testConflictIfExistingSubPathAndNoOverrideStatement() { $this->repo->expects($this->never()) ->method('add'); $overridingPackage1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', ), ), ), )); $overridingPackage2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden/config' => 'override', ), ), ), )); $this->builder->loadPackage($overridingPackage1); $this->builder->loadPackage($overridingPackage2); $this->builder->buildRepository($this->repo); } public function testNoConflictIfNewSubPathAndNoOverrideStatement() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/overridden/new', new LocalDirectoryResource($this->package2Root.'/override')); $overridingPackage1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', ), ), ), )); $overridingPackage2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden/new' => 'override', ), ), ), )); $this->builder->loadPackage($overridingPackage1); $this->builder->loadPackage($overridingPackage2); $this->builder->buildRepository($this->repo); } public function testDefinePackageOrderOnRootPackage() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('add') ->with('/acme/overridden', new LocalDirectoryResource($this->package2Root.'/override')); $rootPackage = $this->createRootPackage(array( 'extra' => array( 'puli' => array( 'package-order' => array( 'acme/package1', 'acme/package2', ), ), ), )); $overridingPackage1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', ), ), ), )); $overridingPackage2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'override', ), ), ), )); $this->builder->loadPackage($rootPackage); $this->builder->loadPackage($overridingPackage1); $this->builder->loadPackage($overridingPackage2); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Extension\Composer\RepositoryBuilder\ResourceConflictException */ public function testPackageOrderInNonRootPackageIsIgnored() { $this->repo->expects($this->never()) ->method('add'); $pseudoRootPackage = $this->createPackage('/', array( 'extra' => array( 'puli' => array( 'package-order' => array( 'acme/package2', 'acme/package1', ), ), ), )); $overridingPackage1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'resources', ), ), ), )); $overridingPackage2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/overridden' => 'override', ), ), ), )); $this->builder->loadPackage($pseudoRootPackage); $this->builder->loadPackage($overridingPackage1); $this->builder->loadPackage($overridingPackage2); $this->builder->buildRepository($this->repo); } public function testTagResources() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('tag') ->with('/acme/package', 'acme/tag'); $package = $this->createPackage($this->package1Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package' => 'resources', ), 'tags' => array( '/acme/package' => 'acme/tag', ), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } public function testTagResourcesFromExistingOtherPackage() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package1', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('tag') ->with('/acme/package1', 'acme/tag'); $package1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package1' => 'resources', ), ), ), )); $package2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'tags' => array( '/acme/package1' => 'acme/tag', ), ), ), )); $this->builder->loadPackage($package1); $this->builder->loadPackage($package2); $this->builder->buildRepository($this->repo); } public function testTagResourcesFromFutureOtherPackage() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package1', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('tag') ->with('/acme/package1', 'acme/tag'); $package1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package1' => 'resources', ), ), ), )); $package2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'tags' => array( '/acme/package1' => 'acme/tag', ), ), ), )); $this->builder->loadPackage($package2); $this->builder->loadPackage($package1); $this->builder->buildRepository($this->repo); } public function testTagInTwoPackages() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package1', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('tag') ->with('/acme/package1', 'acme/tag1'); $this->repo->expects($this->at(2)) ->method('tag') ->with('/acme/package1', 'acme/tag2'); $package1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package1' => 'resources', ), 'tags' => array( '/acme/package1' => 'acme/tag1', ), ), ), )); $package2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'tags' => array( '/acme/package1' => 'acme/tag2', ), ), ), )); $this->builder->loadPackage($package1); $this->builder->loadPackage($package2); $this->builder->buildRepository($this->repo); } public function testDuplicateTags() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package1', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('tag') ->with('/acme/package1', 'acme/tag'); $package1 = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package1' => 'resources', ), 'tags' => array( '/acme/package1' => 'acme/tag', ), ), ), )); $package2 = $this->createPackage($this->package2Root, array( 'name' => 'acme/package2', 'extra' => array( 'puli' => array( 'tags' => array( '/acme/package1' => 'acme/tag', ), ), ), )); $this->builder->loadPackage($package2); $this->builder->loadPackage($package1); $this->builder->buildRepository($this->repo); } public function testMultipleTags() { $this->repo->expects($this->at(0)) ->method('add') ->with('/acme/package1', new LocalDirectoryResource($this->package1Root.'/resources')); $this->repo->expects($this->at(1)) ->method('tag') ->with('/acme/package1', 'acme/tag1'); $this->repo->expects($this->at(2)) ->method('tag') ->with('/acme/package1', 'acme/tag2'); $package = $this->createPackage($this->package1Root, array( 'name' => 'acme/package1', 'extra' => array( 'puli' => array( 'resources' => array( '/acme/package1' => 'resources', ), 'tags' => array( '/acme/package1' => array('acme/tag1', 'acme/tag2'), ), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Extension\Composer\RepositoryBuilder\ResourceDefinitionException */ public function testResourcesMustBeArray() { $package = $this->createPackage($this->package1Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'resources' => 'foobar', ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Extension\Composer\RepositoryBuilder\ResourceDefinitionException */ public function testOverrideMustBeStringOrArray() { $package = $this->createPackage($this->package1Root, array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'override' => new \stdClass(), ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Extension\Composer\RepositoryBuilder\ResourceDefinitionException */ public function testOverrideOrderMustBeArray() { $package = $this->createRootPackage(array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'package-order' => 'foobar', ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } /** * @expectedException \Puli\Extension\Composer\RepositoryBuilder\ResourceDefinitionException */ public function testTagsMustBeArray() { $package = $this->createRootPackage(array( 'name' => 'acme/package', 'extra' => array( 'puli' => array( 'tags' => 'foobar', ), ), )); $this->builder->loadPackage($package); $this->builder->buildRepository($this->repo); } /** * @param array $config * * @return \Composer\Package\PackageInterface */ private function createPackage($root, array $config) { $package = $this->getMock('\Composer\Package\PackageInterface'); $package->expects($this->any()) ->method('getName') ->will($this->returnValue(isset($config['name']) ? $config['name'] : '')); $package->expects($this->any()) ->method('getExtra') ->will($this->returnValue(isset($config['extra']) ? $config['extra'] : array())); $this->packageRoots[spl_object_hash($package)] = $root; return $package; } /** * @param array $config * * @return \Composer\Package\PackageInterface */ private function createRootPackage(array $config) { $package = $this->getMock('\Composer\Package\RootPackageInterface'); $package->expects($this->any()) ->method('getName') ->will($this->returnValue(isset($config['name']) ? $config['name'] : '__root__')); $package->expects($this->any()) ->method('getExtra') ->will($this->returnValue(isset($config['extra']) ? $config['extra'] : array())); return $package; } /** * @param array $config * * @return \Composer\Package\AliasPackage */ private function createAliasPackage(array $config) { $package = $this->getMockBuilder('\Composer\Package\AliasPackage') ->disableOriginalConstructor() ->getMock(); $package->expects($this->any()) ->method('getName') ->will($this->returnValue(isset($config['name']) ? $config['name'] : '')); $package->expects($this->any()) ->method('getExtra') ->will($this->returnValue(isset($config['extra']) ? $config['extra'] : array())); return $package; } } <file_sep>/tests/PackageGraph/PackageGraphTest.php <?php /* * This file is part of the Composer Puli Plugin. * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Puli\Extension\Composer\Tests\PackageGraph; use Puli\Extension\Composer\PackageGraph\PackageGraph; /** * @since 1.0 * @author <NAME> <<EMAIL>> */ class PackageGraphTest extends \PHPUnit_Framework_TestCase { /** * @var PackageGraph */ private $graph; protected function setUp() { $this->graph = new PackageGraph(); } private function initializeGraph() { // (p1) → (p2) → (p3) // ↗ ↘ ↗ // (p5) (p4) // // (p6) $this->graph->addPackage('p1'); $this->graph->addPackage('p2'); $this->graph->addPackage('p3'); $this->graph->addPackage('p4'); $this->graph->addPackage('p5'); $this->graph->addPackage('p6'); $this->graph->addEdge('p1', 'p2'); $this->graph->addEdge('p2', 'p3'); $this->graph->addEdge('p2', 'p4'); $this->graph->addEdge('p5', 'p2'); $this->graph->addEdge('p4', 'p3'); } public function providePaths() { return array( // adjacent array('p1', 'p2', array('p1', 'p2')), // adjacent, wrong order array('p2', 'p1', null), // multi-node array('p1', 'p3', array('p1', 'p2', 'p3')), // multi-node, wrong order array('p3', 'p1', null), // multi-node, no path array('p3', 'p4', null), // node without edges array('p1', 'p5', null), array('p5', 'p1', null), // undefined node array('p1', 'foo', null), array('foo', 'p1', null), ); } /** * @dataProvider providePaths */ public function testHasPath($from, $to, $path) { $this->initializeGraph(); $this->assertSame($path !== null, $this->graph->hasPath($from, $to)); } /** * @dataProvider providePaths */ public function testGetPath($from, $to, $path) { $this->initializeGraph(); $this->assertSame($path, $this->graph->getPath($from, $to)); } /** * @expectedException \InvalidArgumentException */ public function testAddPackageFailsIfAlreadyDefined() { $this->graph->addPackage('p1'); $this->graph->addPackage('p1'); } /** * @expectedException \InvalidArgumentException */ public function testAddEdgeFailsIfLeftPackageDoesNotExist() { $this->graph->addPackage('p2'); $this->graph->addEdge('p1', 'p2'); } /** * @expectedException \InvalidArgumentException */ public function testAddEdgeFailsIfRightPackageDoesNotExist() { $this->graph->addPackage('p1'); $this->graph->addEdge('p1', 'p2'); } /** * @expectedException \Puli\Extension\Composer\PackageGraph\CycleException */ public function testAddEdgeFailsIfCycle() { $this->graph->addPackage('p1'); $this->graph->addPackage('p2'); $this->graph->addEdge('p1', 'p2'); $this->graph->addEdge('p2', 'p1'); } public function testGetSortedPackages() { $this->initializeGraph(); $this->assertSame(array('p1', 'p5', 'p2', 'p4', 'p3', 'p6'), $this->graph->getSortedPackages()); } public function testGetSortedPackagesOfSubset() { $this->initializeGraph(); $this->assertSame(array('p1', 'p4', 'p3', 'p5', 'p6'), $this->graph->getSortedPackages(array('p1', 'p3', 'p4', 'p5', 'p6'))); } /** * @expectedException \InvalidArgumentException */ public function testGetSortedPackagesExpectsValidPackages() { $this->graph->getSortedPackages(array('foo')); } public function testHasPackage() { $this->assertFalse($this->graph->hasPackage('p1')); $this->graph->addPackage('p1'); $this->assertTrue($this->graph->hasPackage('p1')); } public function testHasEdge() { $this->assertFalse($this->graph->hasEdge('p1', 'p2')); $this->assertFalse($this->graph->hasEdge('p2', 'p1')); $this->graph->addPackage('p1'); $this->graph->addPackage('p2'); $this->assertFalse($this->graph->hasEdge('p1', 'p2')); $this->assertFalse($this->graph->hasEdge('p2', 'p1')); $this->graph->addEdge('p1', 'p2'); $this->assertTrue($this->graph->hasEdge('p1', 'p2')); $this->assertFalse($this->graph->hasEdge('p2', 'p1')); } } <file_sep>/src/PackageGraph/PackageGraph.php <?php /* * This file is part of the Composer Puli Plugin. * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Puli\Extension\Composer\PackageGraph; /** * A directed, acyclic graph of package names. * * Packages can be added with {@link addPackage()}. Edges between these packages * can then be added using {@link addEdge()}. Both ends of an edge must have * been defined before the edge is added. * * ```php * $graph = new PackageGraph(); * $graph->addPackage('acme/core'); * $graph->addPackage('acme/blog'); * $graph->addPackage('acme/blog-extension1'); * $graph->addPackage('acme/blog-extension2'); * $graph->addEdge('acme/core', 'acme/blog'); * $graph->addEdge('acme/blog', 'acme/blog-extension1'); * $graph->addEdge('acme/blog', 'acme/blog-extension2'); * $graph->addEdge('acme/blog-extension1', 'acme/blog-extension2'); * ``` * * You can use {@link getPath()} and {@link hasPath()} to check whether a path * exists from one package to the other: * * ```php * // ... * * $graph->hasPath('acme/blog', 'acme/blog-extension1'); * // => true * * $graph->hasPath('acme/blog-extension1', 'acme/blog-extension2'); * // => false * * $graph->getPath('acme/core', 'acme/blog-extension2'); * // => array('acme/core', 'acme/blog', 'acme/blog-extension2') * ``` * * With {@link getSortedPackages()}, you can sort the packages such that the * dependencies defined via the edges are respected: * * ```php * // ... * * $graph->getSortedPackages(); * // => array('acme/core', 'acme/blog', 'acme/blog-extension1', 'acme/blog-extension2') * ``` * * @since 1.0 * @author <NAME> <<EMAIL>> */ class PackageGraph { /** * Stores the names of all packages (vertices) as keys. * * @var array */ private $packages = array(); /** * Stores the edges in the keys of a multi-dimensional array. * * The first dimension stores the targets, the second dimension the origins * of the edges. * * @var array */ private $edges = array(); /** * Adds a package name to the graph. * * @param string $package The package name. * * @throws \InvalidArgumentException If the package name already exists. */ public function addPackage($package) { if (isset($this->packages[$package])) { throw new \InvalidArgumentException(sprintf( 'The package "%s" was added to the graph twice.', $package )); } $this->packages[$package] = true; $this->edges[$package] = array(); } /** * Returns whether a package name exists in the graph. * * @param string $package The package name. * * @return bool Whether the package name exists. */ public function hasPackage($package) { return isset($this->packages[$package]); } /** * Adds a directed edge from one to another package. * * @param string $fromPackage The origin package name. * @param string $toPackage The target package name. * * @throws \InvalidArgumentException If any of the packages does not exist * in the graph. Each package must have * been added first. * * @throws CycleException If adding the edge would create a cycle. */ public function addEdge($fromPackage, $toPackage) { if (!isset($this->packages[$fromPackage])) { throw new \InvalidArgumentException(sprintf( 'The package "%s" does not exist in the graph.', $fromPackage )); } if (!isset($this->packages[$toPackage])) { throw new \InvalidArgumentException(sprintf( 'The package "%s" does not exist in the graph.', $toPackage )); } if (null !== ($path = $this->getPath($toPackage, $fromPackage))) { $last = array_pop($path); throw new CycleException(sprintf( 'A cyclic dependency was discovered between the packages "%s" '. 'and "%s". Please check the "override" keys defined in these'. 'packages.', implode('", "', $path), $last )); } $this->edges[$toPackage][$fromPackage] = true; } /** * Returns whether an edge exists between two packages. * * @param string $fromPackage The origin package name. * @param string $toPackage The target package name. * * @return bool Whether an edge exists from the origin to the target package. */ public function hasEdge($fromPackage, $toPackage) { return isset($this->edges[$toPackage][$fromPackage]); } /** * Returns whether a path exists from one to another package. * * @param string $fromPackage The origin package name. * @param string $toPackage The target package name. * * @return bool Whether a path exists from the origin to the target package. */ public function hasPath($fromPackage, $toPackage) { // does not exist in the graph if (!isset($this->edges[$toPackage])) { return false; } // adjacent node if (isset($this->edges[$toPackage][$fromPackage])) { return true; } // DFS foreach ($this->edges[$toPackage] as $predecessor => $_) { if ($this->hasPath($fromPackage, $predecessor)) { return true; } } return false; } /** * Returns the path from one to another package. * * @param string $fromPackage The origin package name. * @param string $toPackage The target package name. * * @return string[]|null The path of package names or `null`, if no path * was found. */ public function getPath($fromPackage, $toPackage) { if ($this->getPathDFS($fromPackage, $toPackage, $reversePath)) { return array_reverse($reversePath); } return null; } /** * Returns all packages in the graph. * * @return string All package names in the graph. */ public function getPackages() { return $this->packages; } /** * Sorts packages according to the defined edges. * * The packages are sorted such that if two packages p1 and p2 have an edge * (p1, p2) in the graph, then p1 comes before p2 in the sorted set. * * If no packages are passed, all packages are sorted. * * @param string[] $packagesToSort The packages which should be sorted. * * @return string[] The sorted package names. * * @throws \InvalidArgumentException If any of the passed packages does not * exist in the graph. */ public function getSortedPackages(array $packagesToSort = array()) { if (count($packagesToSort) > 0) { $packagesToSort = array_flip($packagesToSort); foreach ($packagesToSort as $package => $_) { if (!isset($this->packages[$package])) { throw new \InvalidArgumentException(sprintf( 'The package "%s" does not exist in the graph.', $package )); } } } else { $packagesToSort = $this->packages; } $sorted = array(); // Do a topologic sort // Start with any package and process until no more are left while (false !== reset($packagesToSort)) { $this->sortPackagesDFS(key($packagesToSort), $packagesToSort, $sorted); } return $sorted; } /** * Finds a path between packages using Depth-First Search. * * @param string $fromPackage The origin package name. * @param string $toPackage The target package name. * @param array $reversePath The path in reverse order. * * @return bool Whether a path was found. */ private function getPathDFS($fromPackage, $toPackage, &$reversePath = array()) { // does not exist in the graph if (!isset($this->edges[$toPackage])) { return false; } $reversePath[] = $toPackage; // adjacent node if (isset($this->edges[$toPackage][$fromPackage])) { $reversePath[] = $fromPackage; return true; } // DFS foreach ($this->edges[$toPackage] as $predecessor => $_) { if ($this->getPathDFS($fromPackage, $predecessor, $reversePath)) { return true; } } return false; } /** * Topologically sorts the given package name into the output array. * * The resulting array is sorted such that all predecessors of the package * come before the package (and their predecessors before them, and so on). * * @param string $package The package to sort. * @param array $packagesToSort The packages yet to be sorted. * @param array $output The output array. */ private function sortPackagesDFS($package, array &$packagesToSort, array &$output) { unset($packagesToSort[$package]); // Before adding the package itself to the path, add all predecessors. // Do so recursively, then we make sure that each package is visited // in the path before any of its successors. foreach ($this->edges[$package] as $predecessor => $_) { // The package was already processed. Either the package is on the // path already, then we're good. Otherwise, we have a cycle. // However, addEdge() guarantees that the graph is cycle-free. if (isset($packagesToSort[$predecessor])) { $this->sortPackagesDFS($predecessor, $packagesToSort, $output); } } $output[] = $package; } } <file_sep>/tests/RepositoryDumper/RepositoryDumperTest.php <?php /* * This file is part of the Composer Puli Plugin. * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Puli\Extension\Composer\Tests\RepositoryDumper; use Composer\Package\PackageInterface; use Composer\Util\Filesystem; use Puli\Extension\Composer\RepositoryDumper\RepositoryDumper; use Puli\Repository\ManageableRepositoryInterface; /** * @since 1.0 * @author <NAME> <<EMAIL>> */ class RepositoryDumperTest extends \PHPUnit_Framework_TestCase { private $tempDir; protected function setUp() { while (false === mkdir($this->tempDir = sys_get_temp_dir().'/puli-plugin/RepositoryDumperTest'.rand(10000, 99999), 0777, true)) {} } protected function tearDown() { $filesystem = new Filesystem(); $filesystem->remove($this->tempDir); } public function testDumpRepository() { $projectDir = $this->tempDir.'/project'; $vendorDir = $this->tempDir.'/vendor'; mkdir($projectDir); mkdir($vendorDir); // Create dependencies $builder = $this->getMockBuilder('Puli\Extension\Composer\RepositoryBuilder\RepositoryBuilder') ->disableOriginalConstructor() ->getMock(); $projectPackage = $this->getMock('Composer\Package\PackageInterface'); $instPackage1 = $this->getMock('Composer\Package\PackageInterface'); $instPackage2 = $this->getMock('Composer\Package\PackageInterface'); $installationManager = $this->getMockBuilder('Composer\Installer\InstallationManager') ->disableOriginalConstructor() ->getMock(); $installationManager->expects($this->any()) ->method('getInstallPath') ->will($this->returnCallback(function (PackageInterface $package) use ($instPackage1, $instPackage2) { if ($package === $instPackage1) { return '/inst1/dir'; } if ($package === $instPackage2) { return '/inst2/dir'; } return '/unknown'; })); // Configure $dumper = new RepositoryDumper(); $dumper->setVendorDir($vendorDir); $dumper->setProjectPackage($projectPackage); $dumper->setInstalledPackages(array($instPackage1, $instPackage2)); $dumper->setRepositoryBuilder($builder); // Expectations $builder->expects($this->at(0)) ->method('loadPackage') ->with($projectPackage); $builder->expects($this->at(1)) ->method('loadPackage') ->with($instPackage1); $builder->expects($this->at(2)) ->method('loadPackage') ->with($instPackage2); $builder->expects($this->at(3)) ->method('buildRepository') ->with($this->isInstanceOf('Puli\Repository\ManageableRepositoryInterface')) ->will($this->returnCallback(function (ManageableRepositoryInterface $repo) { $repo->add('/file', __FILE__); })); // Go $dumper->dumpRepository(); // Check that the file has been created $this->assertFileExists($vendorDir.'/resource-repository.php'); // Load and test $generatedRepo = require ($vendorDir.'/resource-repository.php'); $this->assertInstanceOf('Puli\Repository\ResourceRepositoryInterface', $generatedRepo); $this->assertTrue($generatedRepo->contains('/file')); $this->assertSame(__FILE__, $generatedRepo->get('/file')->getLocalPath()); $this->assertFalse($generatedRepo->contains('/foo')); } }
6fa846fe96163b1142c624a161dcc32b7b5b7245
[ "Markdown", "PHP" ]
9
Markdown
hason/composer-puli-plugin
cc4a65e21cadf1136337e6d1569408d4bc96b8cc
fbc91127c83d97adf6321c0abb52ad511d2620d9
refs/heads/main
<repo_name>mattdavis1337/nftVwr<file_sep>/priv/static/js/tile_engine.module.js import { EventDispatcher, MOUSE, } from './three.module.js'; import { TWEEN } from "./tween.module.min.js" import { TrackballControls } from "./TrackballControls.js"; import { CSS3DRenderer, CSS3DObject } from "./CSS3DRenderer.js"; class TileEngine extends EventDispatcher { constructor( object, domElement ) { this.initScene = function(){ container = document.createElement( 'div' ); document.body.appendChild( container ); scene = new THREE.Scene(); } } }<file_sep>/README.md # NftVwr Specs for Zilliqa Judges: https://docs.google.com/document/d/1fuo_g4D_VZfBQtlpldwNpvDVBLqQyXZNgxvBcHRR44E/edit?usp=sharing **To start your Phoenix server:** 1) Clone this repo 2) Install dependencies with `mix deps.get` 3) (optional) Create and migrate your database with `mix ecto.setup` 4) Install Node.js dependencies with `npm install` inside the `assets` directory 5) from NftVwr root folder, start Phoenix endpoint with `mix phx.server` in project root Now you can now visit [`localhost:4001`](http://localhost:4001) from your browser. ** Dependencies** Phoenix/Elixir 1.9.1, Mix 1.9.1 (compiled with Erlang/OTP 22) - install guides: https://hexdocs.pm/phoenix/installation.html#content Node 14.17.0 (suggested use nvm: https://heynode.com/tutorial/install-nodejs-locally-nvm) This repo: https://github.com/UI369/nftVwr ## Learn more * Postgres: https://www.postgresql.org/ (Postgres - not needed for the project, but just incase there are dependencies I neglected to remove from core engine. I've only tested on a machine that has Postgres installed). Use Case Supported: “User signs in with Zilpay and imports their NFTs to the platform.” User loads the home page: Show the Tile area with existing tiles and any blank spaces. Initiator: User clicks the “Zilpay” button. Signal Zilpay to authenticate when the Zilpay button is pressed. Reveal a 3x3 grid interface - 9 tile slots ready to receive a tile that represents an NFT - and the user’s Zilpay bech32 wallet address. Clicking a tile on the 3x3 grid that prompts the user to enter an NFT contract address using a simple javascript prompt. <file_sep>/priv/static/js/modal.js /*// Get the modal var modal = document.getElementById("modal-content"); // Get the button that opens the modal var btn = document.getElementById("modal-button"); // Get the <span> element that closes the modal var span = document.getElementById("modal-close"); // When the user clicks on <span> (x), close the modal span.onclick = function() { console.log("close clicked"); modal.style.display = "none"; } // When the user clicks anywhere outside of the modal, close it window.onclick = function(event) { if (event.target == modal) { modal.style.display = "none"; } } function outsideClick(e) { console.log(e); if(e.target.id == "modal-button"){ var event = new Event("modalButtonClicked"); console.dir(document.getElementById("modal-text")); event.data = document.getElementById("modal-text").value; console.log('dispatching event'); console.dir(event); document.dispatchEvent(event); } if (e.target.closest(".modal-inner")) { return; } const modalVisible = document.querySelector(".modal-visible"); if (modalVisible) { closeModal(); } } function escKey(evt) { var isEscape = false; if ("key" in evt) { isEscape = (evt.key === "Escape" || evt.key === "Esc"); } else { isEscape = (evt.keyCode === 27); } if (isEscape) { closeModal(); } } function closeClick(e) { console.log('closeClick'); if (e.target.classList.contains("closeModal")) { closeModal(); } } function trapTabKey(e) { let isTabPressed = e.key === 'Tab' || e.keyCode === 9; if (!isTabPressed) { return; } const vanillaModal = document.querySelector(".vanilla-modal"); const FOCUSABLE_ELEMENTS = [ "a[href]", "area[href]", 'input:not([disabled]):not([type="hidden"]):not([aria-hidden])', "select:not([disabled]):not([aria-hidden])", "textarea:not([disabled]):not([aria-hidden])", "button:not([disabled]):not([aria-hidden])", "iframe", "object", "embed", "[contenteditable]", '[tabindex]:not([tabindex^="-"])', ]; const nodes = vanillaModal.querySelectorAll(FOCUSABLE_ELEMENTS); let focusableNodes = Array(...nodes); if (focusableNodes.length === 0) return; focusableNodes = focusableNodes.filter((node) => { return node.offsetParent !== null; }); // if disableFocus is true if (!vanillaModal.contains(document.activeElement)) { focusableNodes[0].focus(); } else { const focusedItemIndex = focusableNodes.indexOf(document.activeElement); if (e.shiftKey && focusedItemIndex === 0) { focusableNodes[focusableNodes.length - 1].focus(); e.preventDefault(); } if ( !e.shiftKey && focusableNodes.length > 0 && focusedItemIndex === focusableNodes.length - 1 ) { focusableNodes[0].focus(); e.preventDefault(); } } } function closeModal() { const vanillaModal = document.querySelector(".vanilla-modal"); if (vanillaModal) { vanillaModal.classList.remove("modal-visible"); document.getElementById("modal-content").innerHTML = ""; document.getElementById("modal-content").style = ""; } document.removeEventListener("keydown", escKey, false); //document.removeEventListener("click", outsideClick, true); //document.removeEventListener("click", closeClick); document.removeEventListener("keydown", trapTabKey, false); } const modal = { init: function () { const prerendredModal = document.createElement("div"); prerendredModal.classList.add("vanilla-modal"); const htmlModal = ` <div class="modal"> <div class="modal-inner" ><div id="modal-content"></div></div></div>`; prerendredModal.innerHTML = htmlModal; document.body.appendChild(prerendredModal); }, open: function (idContent, option = { default: null }) { let vanillaModal = document.querySelector(".vanilla-modal"); if (!vanillaModal) { console.log("there is no vanilla modal class"); modal.init(); vanillaModal = document.querySelector(".vanilla-modal"); } const content = document.getElementById(idContent); let currentModalContent = content.cloneNode(true); currentModalContent.classList.add("current-modal"); currentModalContent.style = ""; document.getElementById("modal-content").appendChild(currentModalContent); if (!option.default) { if (option.width && option.height) { document.getElementById("modal-content").style.width = option.width; document.getElementById("modal-content").style.height = option.height; } } vanillaModal.classList.add("modal-visible"); // document.addEventListener("click", outsideClick, true); document.addEventListener("keydown", escKey); document.addEventListener("keydown", trapTabKey); //document.getElementById("modal-content").addEventListener("click", closeClick, true); }, close: function () { closeModal(); }, }; // for webpack es6 use uncomment the next line // export default modal; */<file_sep>/assets/js/socket.js // NOTE: The contents of this file will only be executed if // you uncomment its entry in "assets/js/app.js". // To use Phoenix channels, the first step is to import Socket, // and connect at the socket path in "lib/web/endpoint.ex". // // Pass the token on params as below. Or remove it // from the params if you are not using authentication. import {Socket} from "phoenix" let socket = new Socket("/socket", {params: {token: window.userToken}}) // When you connect, you'll often need to authenticate the client. // For example, imagine you have an authentication plug, `MyAuth`, // which authenticates the session and assigns a `:current_user`. // If the current user exists you can assign the user's token in // the connection for use in the layout. // // In your "lib/web/router.ex": // // pipeline :browser do // ... // plug MyAuth // plug :put_user_token // end // // defp put_user_token(conn, _) do // if current_user = conn.assigns[:current_user] do // token = Phoenix.Token.sign(conn, "user socket", current_user.id) // assign(conn, :user_token, token) // else // conn // end // end // // Now you need to pass this token to JavaScript. You can do so // inside a script tag in "lib/web/templates/layout/app.html.eex": // // <script>window.userToken = "<%= assigns[:user_token] %>";</script> // // You will need to verify the user token in the "connect/3" function // in "lib/web/channels/user_socket.ex": // // def connect(%{"token" => token}, socket, _connect_info) do // # max_age: 1209600 is equivalent to two weeks in seconds // case Phoenix.Token.verify(socket, "user socket", token, max_age: 1209600) do // {:ok, user_id} -> // {:ok, assign(socket, :user, user_id)} // {:error, reason} -> // :error // end // end // // Finally, connect to the socket: socket.connect() let game_loaded = false; // Now that you are connected, you can join channels with a topic: let gameLobby = socket.channel("game:lobby", {}) gameLobby.join() .receive("ok", resp => { console.log("Joined successfully", resp) }) .receive("error", resp => { console.log("Unable to join", resp) }) document.addEventListener( 'doJoinChannel', function(event) { console.log("doJoinChannel event received. Joining " + event.channel + ":" + event.channel_id) let eventChannel = socket.channel(event.channel + ":" + event.channel_id, {}); //TODOMFD: THis is a hard coded Database ID on the back end, needs changed! eventChannel.join() .receive("ok", resp => { var event = new Event("drawBoard"); console.log("Server response:"); console.dir(resp); event.board = resp.board; window.dispatchEvent(event); }) .receive("error", resp => { console.log("Unable to join " + event.channelID, resp) }); }); document.addEventListener( 'loadMyNFTs', function(event){ console.log("loadMyNFTs event received. Firing load_nfts"); console.dir(event); gameLobby.push('load_nfts', {event}).receive("ok", resp => { resp.forEach(element => {var event = new Event("newTile"); event.tile = element; window.dispatchEvent(event);})}); }) if(!game_loaded){ console.log("firing initGame"); var event = new Event("initGame"); game_loaded = true; window.dispatchEvent(event); } export default socket
6a09a8a17f0d2cea6e90e354864b1a5091bf6428
[ "JavaScript", "Markdown" ]
4
JavaScript
mattdavis1337/nftVwr
ef1d3618537115d3d315251a9140a9316b7745a8
c43a5fcab9af1c90635fdb9e1d6006a52e5f6af9
refs/heads/master
<file_sep>/** * Retrieves a list of player hardware tokens * @param {string} src The player server id * @returns {string[]} A list of player tokens */ export function GetTokens(src: string): string[] { const tokens: string[] = []; const tokenCount: number = GetNumPlayerTokens(src); for (let i = 0; i < tokenCount; i++) tokens.push(GetPlayerToken(src, i)); return tokens; }<file_sep>import { Collection, Cursor, FilterQuery, FindOneOptions, WithoutProjection } from "mongodb"; import { Wait } from "../../../../shared/utils"; import DbMain from "./main"; interface IFindParams { collection: string; filter: FilterQuery<any>; options?: WithoutProjection<FindOneOptions<any>> | FindOneOptions<any> } interface IFindCallback { (result: any): void; } class DbRead extends DbMain { /** * Initialise class members * @constructor */ constructor() { super(); const Exports = (global as any).exports; //Create exports Exports("find", this.Find); Exports("findOne", (params: IFindParams, callback?: IFindCallback) => this.Find(params, callback, true)); Exports("findMany", this.Find); } /** * Finds one or more matched documents in a collection * @param {IFindParams} params The find object * @param {string} params.collection The MongoDB collection name * @param {FilterQuery<any>} params.filter MongoDB filter query * @param {WithoutProjection<FindOneOptions<any>> | FindOneOptions<any>} params.options MongoDB find options * @param {IFindCallback} callback The callback object * @param {boolean} one Whether to use findOne or find * @returns {Promise<void>} Empty promise */ public async Find(params: IFindParams, callback?: IFindCallback, one: boolean = false): Promise<void> { if (!this.initialised) { while (!this.initialised) { await Wait(10); } } if (!params.collection) { console.warn("MongoDB couldn't find collection in params"); return; } if (!params.filter) { console.warn("MongoDB couldn't find filter query in params"); return; } const collection: Collection = this.database.collection(params.collection); if (one) { const find: any = await collection.findOne(params.filter, params.options); if (callback) callback(find); return; } const find: Cursor<any> = await collection.find(params.filter, params.options); const result: Array<any> = await find.toArray(); if (callback) callback(result); } } export default new DbRead();<file_sep>import { ObjectId } from "mongodb"; export interface IUserSchema { _id?: ObjectId, ids: { [id: string]: string, license?: string, license2?: string, steam?: string, discord?: string, xbl?: string, live?: string, ip?: string, fivem?: string, }, tokens: string[], ban?: { reason: string, expire?: number, permanent?: boolean } }<file_sep>import { hexToDec } from "hex2dec"; export interface IIdentifierList { [id: string]: string; } /** * Retrieves a list of player identifiers (steam, license, license2, xbl, live, ip, discord, fivem) * @param src The player server id * @returns A list of player identifiers */ export function GetIdentifiers(src: string): IIdentifierList { const ids: IIdentifierList = {} for (let i = 0; i < GetNumPlayerIdentifiers(src); i++) { const data: string[] = GetPlayerIdentifier(src, i).split(':'); if (data.length > 0) { if (data[0] == "steam") data[1] = hexToDec(data[1]); ids[data[0]] = data[1]; } } if (!ids.ip) ids.ip = GetPlayerEndpoint(src); return ids; }<file_sep>import { HasRequiredIdentifiers } from "../user/identifer"; import { Collection, Db, FindOneOptions, InsertOneWriteOpResult, ObjectId } from "mongodb"; import { GetIdentifiers, IIdentifierList } from "../../../shared/utils/identifier"; import { GetTokens } from "../../../shared/utils/token"; import { BuildUserFindQuery, BuildUserInsertQuery, BuildUserUpdateQuery, IUserFindQuery, IUserSchema, UserCollection } from "../user/query"; import { BanCollection, BuildBanFindQuery, IBanFindQuery, IBanSchema } from "../ban/query"; const settings = { checkBan: 0, }; const messages = { checkIdentifiers: "Checking identifiers", checkBan: "Checking for ban", fetch: "Fetching account data", banMessage: "\r\nYou have been banned from this server", banMessagePermanent: "\r\nYou have been permanently banned from the server", banExpire: "\r\nExpires:", banReason: "\r\nReason:", banId: "\r\nID:", update: "Updating account data", create: "Creating account" }; interface ICfxDeferral { defer(): void, update(message: string): void, presentCard(card: object | string, cb?: (data: any, rawData: string) => void): void, done(failureReason?: string): void, handover(data: { [key: string]: any }): void, } async function OnPlayerConnected(name: string, deferrals: ICfxDeferral, db: Db): Promise<void> { const src: string = (global as any).source const ids: IIdentifierList = GetIdentifiers(src); const tokens: string[] = GetTokens(src); deferrals.defer(); deferrals.update(messages.checkIdentifiers); //Check if a player has the required identifers needed to play const [found, message] = HasRequiredIdentifiers(ids); if (!found) { deferrals.done(message); return; } //Check if ban checking is enabled if (settings.checkBan) { deferrals.update(messages.checkBan); //Build ban query, look for ban const banCollection: Collection = db.collection(BanCollection); const [banFindQuery, banFindOptions]: [IBanFindQuery, FindOneOptions<any>] = BuildBanFindQuery(ids, tokens); const banFindResult: IBanSchema = await banCollection.findOne(banFindQuery, banFindOptions); //Check if a ban exists if (banFindResult) { //Reject if permanent if (banFindResult.ban.permanent) { deferrals.done(`${messages.banMessagePermanent} ${messages.banReason} ${banFindResult.ban.reason} ${messages.banId} ${banFindResult._id.toHexString()}`); return; } const currentTime: number = Date.now(); const expireTime: number = banFindResult.ban.expire * 1000; //Reject if not expired if (currentTime < expireTime) { const expireDate = new Date(expireTime); deferrals.done(`${messages.banMessage} ${messages.banExpire} ${expireDate.toUTCString()} ${messages.banReason} ${banFindResult.ban.reason} ${messages.banId} ${banFindResult._id.toHexString()}`); return; } //Remove expired ban banCollection.updateOne({ _id: banFindResult._id }, { $unset: { ban: 1 } }); } } deferrals.update(messages.fetch); //Build user query, look for user const userCollection: Collection = db.collection(UserCollection); const userFindQuery: IUserFindQuery = BuildUserFindQuery(ids); const userFindResult: IUserSchema = await userCollection.findOne(userFindQuery); let userId: ObjectId; if (userFindResult) { //Build update query, update if necessary deferrals.update(messages.update); const [userUpdateFilter, userUpdateQuery, userUpdateRequired] = BuildUserUpdateQuery(userFindResult, ids, tokens); if (userUpdateRequired) userCollection.updateOne(userUpdateFilter, userUpdateQuery); userId = userFindResult._id; } else { //Build create query, create user deferrals.update(messages.create); const userInsertQuery: IUserSchema = BuildUserInsertQuery(ids, tokens); const userInsertResult: InsertOneWriteOpResult<any> = await userCollection.insertOne(userInsertQuery); userId = userInsertResult.insertedId; } deferrals.done("Leave"); } export default function Init(db: Db): void { settings.checkBan = GetConvarInt("queue_check_bans", 1); on("playerConnecting", (name: string, _setKickReason: (reason: string) => void, deferrals: ICfxDeferral) => OnPlayerConnected(name, deferrals, db)); }<file_sep>import { Db } from "mongodb"; import Connector from "./connector"; import DbCreate from "./crud/create"; import DbRead from "./crud/read"; import DbUpdate from "./crud/update"; import DbDelete from "./crud/delete"; let database: Db = null; function OnConnectSuccess(result: boolean) { database = Connector.Database; DbCreate.Init(database, result); DbRead.Init(database, result); DbUpdate.Init(database, result); DbDelete.Init(database, result); } function OnConnectError(error: Error) { console.error(`MongoDB ${error.message}`); } export default async function Init(): Promise<Db> { await Connector.Connect().then(OnConnectSuccess).catch(OnConnectError); return database; }<file_sep>fx_version "cerulean" games { "gta5" } author "<NAME>" description "A central management interface for handling data" version "1.0" client_script "dist/client/*.client.js" server_script "dist/server/*.server.js"<file_sep>import DbMain from "./main"; import { Wait } from "../../../../shared/utils"; import { Collection, FilterQuery, ObjectId, UpdateManyOptions, UpdateOneOptions, UpdateQuery, UpdateWriteOpResult } from "mongodb"; interface IUpdateParams { collection: string; filter: FilterQuery<any>; update: UpdateQuery<any> | Partial<any>; options?: UpdateOneOptions | UpdateManyOptions; } interface IUpdateCallback { (success: number, matchedCount: number, modifiedCount: number, upsertedCount: number, upsertedId?: { _id: ObjectId }): void; } class DbUpdate extends DbMain { /** * Initialise class members * @constructor */ constructor() { super(); const Exports = (global as any).exports; //Create exports Exports("update", this.Update); Exports("updateOne", this.Update); Exports("updateMany", (params: IUpdateParams, callback?: IUpdateCallback) => this.Update(params, callback, true)); } /** * Updates one or more matched documents in a collection * @param {IUpdateParams} params The update object * @param {string} params.collection The MongoDB collection name * @param {FilterQuery<any>} params.filter MongoDB filter query * @param {UpdateQuery<any> | Partial<any>} params.update MongoDB update query * @param {UpdateOneOptions | UpdateManyOptions} params.options MongoDB update options * @param {IUpdateCallback} callback The callback object * @param {boolean} many Whether to use updateMany or updateOne * @returns {Promise<void>} Empty promise */ public async Update(params: IUpdateParams, callback?: IUpdateCallback, many: boolean = false): Promise<void> { if (!this.initialised) { while (!this.initialised) { await Wait(10); } } if (!params.collection) { console.warn("MongoDB couldn't find collection in params"); return; } if (!params.filter) { console.warn("MongoDB couldn't find filter query in params"); return; } if (!params.update) { console.warn("MongoDB couldn't find update query in params"); return; } const collection: Collection = this.database.collection(params.collection); if (many) { const update: UpdateWriteOpResult = await collection.updateMany(params.filter, params.update, params.options); if (callback) callback(update.result.ok, update.matchedCount, update.modifiedCount, update.upsertedCount, update.upsertedId); return; } const update: UpdateWriteOpResult = await collection.updateOne(params.filter, params.update, params.options); if (callback) callback(update.result.ok, update.matchedCount, update.modifiedCount, update.upsertedCount, update.upsertedId); return; } } export default new DbUpdate();<file_sep>import { FindOneOptions, ObjectId } from "mongodb"; import { IIdentifierList } from "../../../shared/utils/identifier"; import { RequiredIdentifiers, RequiredIdentifier } from "../user/identifer"; import { UserCollection } from "../user/query"; interface IBanFindQueryItem { [field: string]: any } export interface IBanFindQuery { $or: Array<IBanFindQueryItem> } export interface IBanSchema { _id: ObjectId, ban?: { reason: string, expire?: number, permanent?: boolean } } /** * Creates a $or: [ id, id ] query based on the required and optional identifers and tokens * @param {IIdentifierList} ids Collection of player identifiers * @param {string[]} tokens Collection of player tokens * @returns {IBanFindQuery} Formatted MongoDB query for finding a banned user document */ export function BuildBanFindQuery(ids: IIdentifierList, tokens: string[]): [IBanFindQuery, FindOneOptions<any>] { let query: IBanFindQuery = { $or: [] }; const options: FindOneOptions<any> = { projection: { "_id": 1, "ban.permanent": 1, "ban.expire": 1, "ban.reason": 1 } }; // Collect all the required fields and push them into the $or array for (let reqIdx = 0; reqIdx < RequiredIdentifiers.length; reqIdx++) { const reqId = RequiredIdentifiers[reqIdx]; const item: IBanFindQueryItem = {}; item[`ids.${reqId}`] = ids[reqId]; query.$or.push(item); } // Collect all the optional fields and push them into the $or array for (let idx = 0; idx < RequiredIdentifier.length; idx++) { const id = RequiredIdentifier[idx]; if (ids[id]) { const item: IBanFindQueryItem = {}; item[`ids.${id}`] = ids[id]; query.$or.push(item); } } // Push all the player token into the $or array query.$or.push({ tokens: { $in: tokens } }); // Make sure the ban document exists within every $or array element for (let item = 0; item < query.$or.length; item++) { query.$or[item].ban = { $exists: true }; } return [query, options]; } export const BanCollection: string = UserCollection;<file_sep>import { Collection, CommonOptions, DeleteWriteOpResultObject, FilterQuery } from "mongodb"; import { Wait } from "../../../../shared/utils"; import DbMain from "./main"; interface IDeleteParams { collection: string; filter: FilterQuery<any>; options?: CommonOptions | CommonOptions & { bypassDocumentValidation?: boolean; } } interface IDeleteCallback { (success: number, deletedCount: number): void; } class DbDelete extends DbMain { /** * Initialise class members * @constructor */ constructor() { super(); const Exports = (global as any).exports; //Create exports Exports("delete", this.Delete); Exports("deleteOne", this.Delete); Exports("deleteMany", (params: IDeleteParams, callback?: IDeleteCallback) => this.Delete(params, callback, true)); } /** * Deletes one or more matched documents in a collection * @param {IDeleteParams} params The delete object * @param {string} params.collection The MongoDB collection name * @param {FilterQuery<any>} params.filter MongoDB filter query * @param {CommonOptions | CommonOptions & { bypassDocumentValidation?: boolean; }} params.options MongoDB delete options * @param {IDeleteCallback} callback The callback object * @param {boolean} many Whether to use deleteMany or deleteOne * @returns {Promise<void>} Empty promise */ public async Delete(params: IDeleteParams, callback?: IDeleteCallback, many: boolean = false): Promise<void> { if (!this.initialised) { while (!this.initialised) { await Wait(10); } } if (!params.collection) { console.warn("MongoDB couldn't find collection in params"); return; } if (!params.filter) { console.warn("MongoDB couldn't find filter query in params"); return; } const collection: Collection = this.database.collection(params.collection); if (many) { const _delete: DeleteWriteOpResultObject = await collection.deleteMany(params.filter, params.options); if (callback) callback(_delete.result.ok, _delete.deletedCount); return; } const _delete: DeleteWriteOpResultObject = await collection.deleteOne(params.filter, params.options); if (callback) callback(_delete.result.ok, _delete.deletedCount); return; } } export default new DbDelete();<file_sep># breach-base A central management interface for handling data ## Dependencies * [Yarn](https://classic.yarnpkg.com/en/docs/install/) in your PATH as `yarn`. ## Building Execute the following commands in a `cmd.exe` shell: ```bat rem Install dependencies yarn install rem Build for production yarn build rem Build for development yarn watch ```<file_sep>import { Collection, CollectionInsertManyOptions, CollectionInsertOneOptions, Db, InsertOneWriteOpResult, InsertWriteOpResult, ObjectId } from "mongodb"; import { Wait } from "../../../../shared/utils"; import DbMain from "./main"; interface IInsertParams { collection: string; document: {} | Array<{}>; options?: CollectionInsertOneOptions | CollectionInsertManyOptions; } interface IInsertCallback { (success: number, count: number, insertedId: ObjectId | { [key: number]: any; }): void; } class DbCreate extends DbMain { /** * Initialise class members * @constructor */ constructor() { super(); const Exports = (global as any).exports; //Create exports Exports("insert", this.Insert); Exports("insertOne", this.Insert); Exports("insertMany", this.Insert); } /** * Insert one or more documents into a collection * @param {IInsertParams} params The insert object * @param {string} params.collection The MongoDB collection name * @param {{} | Array<{}>} params.document MongoDB documents to be inserted * @param {IInsertCallback} callback The callback object * @returns {Promise<void>} Empty promise */ public async Insert(params: IInsertParams, callback?: IInsertCallback): Promise<void> { if (!this.initialised) { while (!this.initialised) { await Wait(10); } } if (!params.collection) { console.warn("MongoDB couldn't find collection in params"); return; } if (!params.document) { console.warn("MongoDB couldn't find document(s) in params"); return; } const collection: Collection = this.database.collection(params.collection); const insertMany: boolean = Array.isArray(params.document); if (insertMany) { const insert: InsertWriteOpResult<any> = await collection.insertMany(params.document as Array<{}>, params.options); if (callback) callback(insert.result.ok, insert.insertedCount, insert.insertedIds); return; } const insert: InsertOneWriteOpResult<any> = await collection.insertOne(params.document, params.options); if (callback) callback(insert.result.ok, insert.insertedCount, insert.insertedId); return; } } export default new DbCreate();<file_sep>// webpack.common.js const path = require("path"); const RemovePlugin = require("remove-files-webpack-plugin"); const buildPath = path.resolve(__dirname, "dist"); const clientBuildPath = path.resolve(buildPath, "client"); const serverBuildPath = path.resolve(buildPath, "server"); const main = { resolve: { extensions: [".tsx", ".ts", ".js"], }, }; const client = { ...main, entry: path.resolve(__dirname, "./src/client/index.ts"), output: { filename: "[fullhash].client.js", path: clientBuildPath, }, plugins: [ new RemovePlugin({ before: { include: [ clientBuildPath ] }, watch: { include: [ clientBuildPath ] } }), ], }; const server = { ...main, entry: path.resolve(__dirname, "./src/server/index.ts"), output: { filename: "[fullhash].server.js", path: serverBuildPath, }, plugins: [ new RemovePlugin({ before: { include: [ serverBuildPath ] }, watch: { include: [ serverBuildPath ] } }), ], target: "node", }; module.exports = [client, server];<file_sep># MongoDB Database connector module ## Server Convars ```c # Credentials set mongodb_username "" set mongodb_password "" # Auth database set mongodb_authdb "" # IP Address set mongodb_host "" # Port set mongodb_port "" # Database to use set mongodb_db "" ``` ## Exports ### Insert exports ```ts interface IInsertParams { collection: string; document: {} | Array<{}>; options?: CollectionInsertOneOptions | CollectionInsertManyOptions; } interface IInsertCallback { (success: number, count: number, insertedId: ObjectId | { [key: number]: any; }): void; } /** * Insert one or more documents into a collection * @param {IInsertParams} params The insert object * @param {string} params.collection The MongoDB collection name * @param {{} | Array<{}>} params.document MongoDB documents to be inserted * @param {CollectionInsertOneOptions | CollectionInsertManyOptions} params.options MongoDB insert options * @param {IInsertCallback} callback The callback object */ exports["breach-base"].insert(params: IInsertParams, callback?: IInsertCallback); exports["breach-base"].insertOne(params: IInsertParams, callback?: IInsertCallback); exports["breach-base"].insertMany(params: IInsertParams, callback?: IInsertCallback); /** * Example usage */ exports["breach-base"].insert({ collection: "name", document: { field: "value" } }, function(success, count, insertedId) { console.log(`Success: ${success}, Count: ${count}, Id: ${insertedId}`); }); /** * Example usage (insertMany) */ exports["breach-base"].insert({ collection: "name", document: [ { field: "value" }, { field: "value2" }, ] }, function(success, count, insertedId) { console.log(`Success: ${success}, Count: ${count}, Id: ${insertedId}`); }); ``` ### Find exports ```ts interface IFindParams { collection: string; filter: FilterQuery<any>; options?: WithoutProjection<FindOneOptions<any>> | FindOneOptions<any> } interface IFindCallback { (result: any): void; } /** * Finds one or more matched documents in a collection * @param {IFindParams} params The find object * @param {string} params.collection The MongoDB collection name * @param {FilterQuery<any>} params.filter MongoDB filter query * @param {WithoutProjection<FindOneOptions<any>> | FindOneOptions<any>} params.options MongoDB find options * @param {IFindCallback} callback The callback object * @param {boolean} one Whether to use findOne or find */ exports["breach-base"].find(params: IFindParams, callback?: IFindCallback, one: boolean = false); exports["breach-base"].findOne(params: IFindParams, callback?: IFindCallback); exports["breach-base"].findMany(params: IFindParams, callback?: IFindCallback, one: boolean = false); /** * Example usage (findOne) */ exports["breach-base"].find({ collection: "name", filter: { field: "value" }, }, function(results) { console.log(results); }, true); /** * Example usage (findMany) */ exports["breach-base"].find({ collection: "name", filter: { field: "value" }, }, function(results) { console.log(results); }); ``` ### Update exports ```ts interface IUpdateParams { collection: string; filter: FilterQuery<any>; update: UpdateQuery<any> | Partial<any>; options?: UpdateOneOptions | UpdateManyOptions; } interface IUpdateCallback { (success: number, matchedCount: number, modifiedCount: number, upsertedCount: number, upsertedId?: { _id: ObjectId }): void; } /** * Updates one or more matched documents in a collection * @param {IUpdateParams} params The update object * @param {string} params.collection The MongoDB collection name * @param {FilterQuery<any>} params.filter MongoDB filter query * @param {UpdateQuery<any> | Partial<any>} params.update MongoDB update query * @param {UpdateOneOptions | UpdateManyOptions} params.options MongoDB update options * @param {IUpdateCallback} callback The callback object * @param {boolean} many Whether to use updateMany or updateOne */ exports["breach-base"].update(params: IUpdateParams, callback?: IUpdateCallback, many: boolean = false); exports["breach-base"].updateOne(params: IUpdateParams, callback?: IUpdateCallback, many: boolean = false); exports["breach-base"].updateMany(params: IUpdateParams, callback?: IUpdateCallback); /** * Example usage (updateOne) */ exports["breach-base"].update({ collection: "name", filter: { field: "value" }, update: { $set: { field: "value update", } }, }, function(success, matchedCount, modifiedCount, upsertedCount, upsertedId) { console.log(`s: ${success}, matc: ${matchedCount}, modc: ${modifiedCount}, uc: ${upsertedCount}`); console.log(upsertedId); }); /** * Example usage (updateMany) */ exports["breach-base"].update({ collection: "name", filter: { field: "value" }, update: { $set: { field: "value update", } }, }, function(success, matchedCount, modifiedCount, upsertedCount, upsertedId) { console.log(`s: ${success}, matc: ${matchedCount}, modc: ${modifiedCount}, uc: ${upsertedCount}`); console.log(upsertedId); }, true); ``` ### Delete exports ```ts interface IDeleteParams { collection: string; filter: FilterQuery<any>; options?: CommonOptions | CommonOptions & { bypassDocumentValidation?: boolean; } } interface IDeleteCallback { (success: number, deletedCount: number): void; } /** * Deletes one or more matched documents in a collection * @param {IDeleteParams} params The delete object * @param {string} params.collection The MongoDB collection name * @param {FilterQuery<any>} params.filter MongoDB filter query * @param {CommonOptions | CommonOptions & { bypassDocumentValidation?: boolean; }} params.options MongoDB delete options * @param {IDeleteCallback} callback The callback object * @param {boolean} many Whether to use deleteMany or deleteOne */ exports["breach-base"].delete(params: IDeleteParams, callback?: IDeleteCallback, many: boolean = false); exports["breach-base"].deleteOne(params: IDeleteParams, callback?: IDeleteCallback, many: boolean = false); exports["breach-base"].deleteMany(params: IDeleteParams, callback?: IDeleteCallback); /** * Example usage (deleteOne) */ exports["breach-base"].delete({ collection: "name", filter: { field: "value" }, }, function(success, count) { console.log(`s: ${success}, num: ${count}`); }); /** * Example usage (deleteOne) */ exports["breach-base"].delete({ collection: "name", filter: { field: "value" }, }, function(success, count) { console.log(`s: ${success}, num: ${count}`); }, true); ```<file_sep>import { IIdentifierList } from "../../../../shared/utils/identifier"; import { IUserSchema } from "./schema"; export function BuildUserInsertQuery(ids: IIdentifierList, tokens: string[]): IUserSchema { const document: IUserSchema = { ids: {}, tokens: tokens }; for (let key in ids) document.ids[key] = ids[key]; return document; }<file_sep>import { ObjectId } from "mongodb"; import { IIdentifierList } from "../../../../shared/utils/identifier"; import { RequiredIdentifier, RequiredIdentifiers } from "../identifer"; import { IUserSchema } from "./schema"; interface IUserUpdateQueryItem { [field: string]: any } export interface IUserUpdateQuery { $set?: IUserUpdateQueryItem, $addToSet?: IUserUpdateQueryItem } export interface IUserUpdateFilter { _id: ObjectId, } /** * Creates an update query based on user identfiers and tokens * @param {IUserSchema} data user document retrieved from the a find query * @param {IIdentifierList} ids Collection of player identifiers * @param {string[]} tokens Collection of player tokens * @returns {[IUserUpdateFilter, IUserUpdateQuery, boolean]} Formatted MongoDB filter & query for updating a user document and if anything has changed */ export function BuildUserUpdateQuery(data: IUserSchema, ids: IIdentifierList, tokens: string[]): [IUserUpdateFilter, IUserUpdateQuery, boolean] { let newTokens: string[] = []; let newIdentifiers: IIdentifierList = {}; //Collect new tokens if (tokens.length > 0) { if (data.tokens.length == 0) newTokens = tokens; else { for (let i = 0; i < tokens.length; i++) { const token = tokens[i]; if (!data.tokens.includes(token)) newTokens.push(token); } } } //Collect new or changed identifers for (let key in ids) { const id = ids[key]; if (!data.ids[key]) newIdentifiers[key] = id; else if (data.ids[key] != id && !RequiredIdentifier.includes(key) && !RequiredIdentifiers.includes(key)) newIdentifiers[key] = id; } const filter: IUserUpdateFilter = { _id: data._id }; const update: IUserUpdateQuery = {}; const newIdentifiersCount = Object.keys(newIdentifiers).length; //Return if nothing needs updating if (newTokens.length == 0 && newIdentifiersCount == 0) return [filter, update, false]; //Insert new or changed identifers into query if (newIdentifiersCount > 0) { update.$set = {}; for (let key in newIdentifiers) update.$set[`ids.${key}`] = newIdentifiers[key]; } //Insert new tokens into query if (newTokens.length > 0) { update.$addToSet = {}; update.$addToSet.tokens = { $each: newTokens }; } return [filter, update, true]; }<file_sep>import { IIdentifierList } from "../../../../shared/utils/identifier"; import { RequiredIdentifiers, RequiredIdentifier } from "../identifer"; interface IUserFindQueryItem { [field: string]: any } export interface IUserFindQuery { $or: Array<IUserFindQueryItem> } /** * Creates a $or: [ ids, ids ] query based on the required and optional identifers available * @param {IIdentifierList} ids Collection of player identifiers * @returns {IUserFindQuery} Formatted MongoDB query for finding a user document */ export function BuildUserFindQuery(ids: IIdentifierList) { let query: IUserFindQuery = { $or: [] }; let optionalCount = 0; // Push all required with a single optional in a $and entry for (let idx = 0; idx < RequiredIdentifier.length; idx++) { const id = RequiredIdentifier[idx]; if (ids[id]) { let item: IUserFindQueryItem = {}; // Collect all the required fields in an array for (let reqIdx = 0; reqIdx < RequiredIdentifiers.length; reqIdx++) { const reqId = RequiredIdentifiers[reqIdx]; item[`ids.${reqId}`] = ids[reqId]; } item[`ids.${id}`] = ids[id]; query.$or.push(item); optionalCount++; } } if (optionalCount > 1) { // Push all required and optional identifers in a $and entry let item: IUserFindQueryItem = {}; // Collect all the required fields in an array for (let reqIdx = 0; reqIdx < RequiredIdentifiers.length; reqIdx++) { const reqId = RequiredIdentifiers[reqIdx]; item[`ids.${reqId}`] = ids[reqId]; } // Push all optional in a $and entry for (let idx = 0; idx < RequiredIdentifier.length; idx++) { const id = RequiredIdentifier[idx]; if (ids[id]) { item[`ids.${id}`] = ids[id]; } } query.$or.push(item); } return query; }<file_sep>import { Collection, Db, MongoClient, MongoError } from "mongodb"; interface ICvarList { [key: string]: string; } class Connector { private uri: string; private client: MongoClient; private database: Db; private initialised: boolean; private convars: ICvarList; /** * Stores the MongoDB URI * * Initialises the MongoDB Client * @constructor */ constructor() { const _defaultConvar: string = "_mongodb_"; this.convars = { username: GetConvar("mongodb_username", _defaultConvar), password: GetConvar("mongodb_password", _defaultConvar), host: GetConvar("mongodb_host", _defaultConvar), port: GetConvar("mongodb_port", _defaultConvar), authdb: GetConvar("mongodb_authdb", _defaultConvar), db: GetConvar("mongodb_db", _defaultConvar), } for (let key in this.convars) { if (this.convars[key] == _defaultConvar) console.warn(`MongoDB Convar "mongodb_${key}" is not set`); //Replace with proper logger util? } this.uri = this.CreateUri(); this.client = new MongoClient(this.uri, { useUnifiedTopology: true }); this.initialised = false; } /** * Constructs the MongoDB URI * @return {string} The constructed MongoDB URI from settings.json */ private CreateUri(): string { return `mongodb://${encodeURIComponent(this.convars.username)}:${encodeURIComponent(this.convars.password)}@${this.convars.host}:${this.convars.port}/${this.convars.authdb}`; } /** * Creates a connection to MongoDB * * Stores an instance of the database * @return {Promise<boolean>} If the connection to MongoDB was successfully initialised */ public Connect(): Promise<boolean> { let instance: Connector = this; return new Promise((resolve, reject) => { instance.client.connect((error: MongoError) => { if (error) { reject(error); return; } instance.database = instance.client.db(this.convars.db); instance.initialised = true; resolve(instance.initialised); }); }); } /** * Fetches the connection result * @return {boolean} If the connection to MongoDB was successfully initialised */ public get Initialised() : boolean { return this.initialised; } /** * Fetches the database instance * @return {Db} The database instance */ public get Database() : Db { return this.database; } /** * Fetch a database collection * @param collectionName name of collection * @return {string} collection instance */ public Collection(collectionName: string) : Collection { return this.database.collection(collectionName); } } export default new Connector();<file_sep>import { IIdentifierList } from "../../../shared/utils/identifier"; //Identifiers that are required const requiredIdentifiers: string[] = ["license2"]; const requiredIdentifierMessages: string[] = [ "Unable to retrieve your R* Id, is your R* account linked?", ]; //At least one of the identifers are required const requiredIdentifier: string[] = ["discord", "steam"]; const requiredIdentifierMessage: string = "You must link your discord or steam account to join this server!"; /** * Check if a player has the required identifers needed to play * @param {IIdentifierList} ids Collection of player identifiers * @returns {[boolean, string]} Returns if the collection contains the required identifiers and an optional error message */ export function HasRequiredIdentifiers(ids: IIdentifierList): [result: boolean, message?: string] { for (let idx = 0; idx < requiredIdentifiers.length; idx++) { const id = requiredIdentifiers[idx]; if (!ids[id]) return [false, requiredIdentifierMessages[idx]]; } for (let idx = 0; idx < requiredIdentifier.length; idx++) { const id = requiredIdentifier[idx]; if (ids[id]) return [true]; } return [false, requiredIdentifierMessage]; } export const RequiredIdentifiers: string[] = requiredIdentifiers; export const RequiredIdentifier: string[] = requiredIdentifier;<file_sep>import { Db } from "mongodb"; abstract class DbMain { protected initialised: boolean; protected database: Db; /** * Initialise class members * @constructor */ constructor() { this.initialised = false; this.database = null; } /** * Sets the database and connection status * @param _database Database instance * @param _initialised Connection result */ public Init(_database: Db, _initialised: boolean): void { this.database = _database; this.initialised = _initialised; } } export default DbMain;<file_sep>export { IUserSchema } from "./schema"; export { IUserFindQuery, BuildUserFindQuery } from "./find"; export { IUserUpdateFilter, IUserUpdateQuery, BuildUserUpdateQuery } from "./update"; export { BuildUserInsertQuery } from "./insert"; const userCollection: string = "users"; export const UserCollection: string = userCollection;<file_sep>import { Db } from "mongodb"; import DbConnector from "./module/mongodb/index"; import QueueManager from "./module/queue/index"; async function Init() { const db: Db = await DbConnector(); QueueManager(db); } setImmediate(Init);<file_sep>{ "compilerOptions": { "types": ["@citizenfx/server", "@citizenfx/client", "@types/hex2dec"], } }
0185a97106176d5e3bb2d5374d0219855fc3382b
[ "Lua", "Markdown", "JavaScript", "JSON with Comments", "TypeScript" ]
23
TypeScript
FrazzIe/breach-base
2f5a0d5cf66d1315f1e4ce9833e2ce51d4076251
4829a94af0fe6a8153e369b7da32d769277a82aa
refs/heads/master
<repo_name>beneficial02/dic_dejizo_soap<file_sep>/app/src/main/java/com/beneficial02/test/dic_dejizo_soap/MainActivity.java package com.beneficial02.test.dic_dejizo_soap; import android.app.Activity; import android.os.AsyncTask; import android.os.Bundle; import android.util.Log; import org.ksoap2.SoapEnvelope; import org.ksoap2.serialization.SoapObject; import org.ksoap2.serialization.SoapSerializationEnvelope; import org.ksoap2.transport.HttpTransportSE; import java.util.ArrayList; /** * http://dejizo.jp/dev/soap.html */ public class MainActivity extends Activity { static final String TAG = MainActivity.class.getSimpleName(); private static final String SOAP_ACTION = "http://MyDictionary.jp/SOAPServiceV11/SearchDicItem"; private static final String URL = "http://public.dejizo.jp/SoapServiceV11.asmx"; private static final String NAME_SPACE = "http://MyDictionary.jp/SOAPServiceV11"; private static final String METHOD_NAME = "SearchDicItem"; private static final String AuthTicket = ""; //blank private static String targetWord; private static final String SortOrderID = ""; //blank private static final int ItemStartIndex = 0; private static final int ItemCount = 5; private static final int CompleteItemCount = 5; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); Log.e(TAG, "onCreate: " + new AsyncFindDict().execute() ); } private class AsyncFindDict extends AsyncTask<Void, Void, String> { @Override protected String doInBackground(Void... voids) { SoapObject request = new SoapObject(NAME_SPACE, METHOD_NAME); request.addProperty("AuthTicket", AuthTicket); SoapObject dicIdList = new SoapObject(NAME_SPACE, "DicIDList"); dicIdList.addProperty(NAME_SPACE, "guid", "8a68bb8a-16ee-4b51-afaa-74c277bb600a"); request.addProperty(NAME_SPACE, "DicIDList", dicIdList); SoapObject queryList = new SoapObject(NAME_SPACE, "QueryList"); SoapObject query = new SoapObject(NAME_SPACE, "Query"); /******** TO SEARCH ANOTHER WORD, CHANGE THIS WORD! *********/ targetWord = "かた"; query.addProperty(NAME_SPACE, "Words", targetWord); query.addProperty(NAME_SPACE, "ScopeID", "HEADWORD"); query.addProperty(NAME_SPACE, "MatchOption", "EXACT"); query.addProperty(NAME_SPACE, "MergeOption", "AND"); queryList.addProperty(NAME_SPACE, "Query", query); request.addProperty(NAME_SPACE, "QueryList", queryList); SoapObject contentProfile = new SoapObject(NAME_SPACE, "ContentProfile"); contentProfile.addProperty(NAME_SPACE, "FormatType", "XHTML"); contentProfile.addProperty(NAME_SPACE, "ResourceOption", "URI"); contentProfile.addProperty(NAME_SPACE, "CharsetOption", "UNICODE"); request.addProperty(NAME_SPACE, "ContentProfile", contentProfile); request.addProperty(NAME_SPACE, "SortOrderID", SortOrderID); request.addProperty(NAME_SPACE, "ItemStartIndex", ItemStartIndex); request.addProperty(NAME_SPACE, "ItemCount", ItemCount); request.addProperty(NAME_SPACE, "CompleteItemCount", CompleteItemCount); return callDictAPI(URL, SOAP_ACTION, request); } private String callDictAPI(String strURL, String strSoapAction, SoapObject request) { try { StringBuffer result; SoapSerializationEnvelope envelope = new SoapSerializationEnvelope(SoapEnvelope.VER11); envelope.setOutputSoapObject(request); envelope.setAddAdornments(false); envelope.implicitTypes = true; HttpTransportSE ht = new HttpTransportSE(strURL); ht.debug = true; ht.call(strSoapAction, envelope); SoapObject response = (SoapObject) envelope.bodyIn; Log.e(TAG, "callDictAPI: response: " + response); String dictRawResult = response.getPropertyAsString(1); String substrUntilWord; String word = ""; String substrAfterWord = ""; ArrayList<String> dictResult = new ArrayList<>(); boolean isKana = false; String kanji = ""; if(targetWord.matches("^[\\u3040-\\u309F]+$") || targetWord.matches("^[\\u30A0-\\u30FF]+$")) { Log.e(TAG, "callDictAPI: it's KANA!!"); isKana = true; } for (int i=0; i<((SoapObject)response.getProperty(1)).getPropertyCount(); i++) { if (isKana) { if (i == 0) { kanji = dictRawResult.substring(dictRawResult.indexOf("Title=")); kanji = kanji.substring(kanji.indexOf("span=")+5, kanji.indexOf(";")); } else { Log.e(TAG, "callDictAPI: substrafterword::" + substrAfterWord); kanji = substrAfterWord.substring(substrAfterWord.indexOf("Title=")); kanji = kanji.substring(kanji.indexOf("span=")+5, kanji.indexOf(";")); } } if (i==0) { substrUntilWord = dictRawResult.substring(dictRawResult.indexOf(")")); } else { substrUntilWord = substrAfterWord.substring(substrAfterWord.indexOf(") ")); } word = substrUntilWord.substring(substrUntilWord.indexOf(")")+1, substrUntilWord.indexOf(";")).trim(); substrAfterWord = substrUntilWord.substring(word.length()-1); if (word.contains("(") && word.contains(")")) { word = word.replaceAll("\\(.*?\\) ?", ""); // remove parentheses in 'word' } if (isKana) word = "["+kanji+"]" + " " + word; if (i==0) { dictResult.add(word); } else { if (!dictResult.get(dictResult.size()-1).equals(word)) { dictResult.add(word); } } } Log.e(TAG, "callDictAPI: RESULT!: " + dictResult); result = new StringBuffer(response.toString()); return result.toString(); } catch (Exception e) { e.printStackTrace(); return null; } } @Override protected void onPostExecute(String s) { } } }<file_sep>/README.md # dic_dejizo_soap This is a sample android app using SOAP API of Dejizo([デ辞蔵](https://dejizo.jp/dev/index.html)), with ksoap2 library. It prints the result in logcat. It doesn't have interactive UI as it is developed only to understand how to use the API.
e2839c9f18bec46d8473c9ba3b55c80086a41933
[ "Markdown", "Java" ]
2
Java
beneficial02/dic_dejizo_soap
4685e7f437c4c2feeea0137345ebb12c24e18bff
1680d18ae85ff3f7104a1570eeb9ec2574263d53
refs/heads/master
<file_sep>package com.example.demo; import java.util.List; public interface UserRepositoryCustom { List<UserEntity> custom(); } <file_sep>package com.kma.practice8.springsecuritycustom.controller; import org.springframework.http.ResponseEntity; import org.springframework.security.access.prepost.PreAuthorize; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.GetMapping; import com.kma.practice8.springsecuritycustom.domain.security.MyCustomUserDetails; @Controller @PreAuthorize("isFullyAuthenticated()") public class UserController { @GetMapping("/profile") public String profile() { return "profile"; } @GetMapping("/user-details") public ResponseEntity<MyCustomUserDetails> userDetails() { final MyCustomUserDetails userDetails = (MyCustomUserDetails) SecurityContextHolder.getContext().getAuthentication().getPrincipal(); return ResponseEntity.ok(userDetails); } } <file_sep>package kma.topic6.springdatasample; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.NamedQueries; import javax.persistence.NamedQuery; import javax.persistence.Table; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.Setter; import lombok.ToString; @Entity @Table(name = "users") @NoArgsConstructor @Getter @Setter @ToString @NamedQueries({ @NamedQuery(query = "SELECT u FROM UserEntity u WHERE u.email = :email", name = UserEntity.FIND_BY_EMAIL) }) public class UserEntity { public static final String FIND_BY_EMAIL = "UserEntity.FIND_BY_EMAIL"; @Id @Column(name = "id") @GeneratedValue(strategy = GenerationType.AUTO) private Integer id; @Column(name = "first_name") private String firstName; @Column(name = "last_name") private String lastName; @Column(name = "email") private String email; } <file_sep>package kma.topic6.springdatasample.embedded; import java.util.List; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.FetchType; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.OneToMany; import javax.persistence.Table; import lombok.Getter; import lombok.Setter; import lombok.ToString; @Entity @Table(name = "apartment") @Getter @Setter @ToString public class ApartmentEntity { @Id @Column(name = "id") @GeneratedValue(strategy = GenerationType.AUTO) private Integer id; @Column(name = "number") private String number; @OneToMany(fetch = FetchType.LAZY) @JoinColumn(name = "apartment_id") private List<BillingEntity> billings; }
28fe72d89dfb1d05b7547448ab1e3678849a92bc
[ "Java" ]
4
Java
Arahizzz/JavaEE
d1b77c242b6ee1ef9375db23112e28ff5af72c0a
2aff3e7399265fb01f793eda35c756e8985dafc3
refs/heads/main
<file_sep>Playbooks - vmsetup/version.yml Writes image version into vmsetup/output/XXXY - vmsetup/setup.yml Tags: - init - contestprep - conteststart - reprovision - contestdone Schedule # 15:00 - Enumerate VMs online, check image verison ansible-playbook vmsetup/version.yml # 16:00 - Start VM init ansible-playbook vmsetup/setup.yml --tags init # 18:00 - Official start of "ioi" reprovisioning ansible-playbook vmsetup/setup.yml --tags contestprep # 18:30 - Start unlock screen ansible-playbook vmsetup/setup.yml --tags conteststart # 18:45 - Check ioibackup's backup is new # 19:00 - Whenever VM needs reprovisioning ansible-playbook -l XXXY vmsetup/setup.yml --tags reprovision # 00:00 - Done ansible-playbook vmsetup/setup.yml --tags contestdone --- 15:00 - Connect VM online. (Do not switch to backup unless needed.) 18:00 - HTC will reprovision the "ioi" account. Wait at login screen. 18:30 - Contestants may login to the "ioi" account; HTC will unlock screen shortly after. Contestant can open Firefox, go to CMS page, but NOT do any other work. 19:00 - Contest commences. 00:00 (next day) - Contest ends. Contestants may leave if no issues (e.g. appeal for extra time). Please leave VM on until HTC announces okay to turn off. Refer to: https://bit.ly/ioi2020siteinfo <file_sep>#!/bin/sh echo $(cat /opt/ioi/misc/VERSION) $( (cat /etc/sudoers /etc/sudoers.d/* /opt/ioi/misc/VERSION; \ iptables -L -n; \ grep -v ioi /etc/passwd; \ grep -v ioi /etc/shadow ) \ | sha256sum | cut -d\ -f1)<file_sep>#!/bin/sh while true; do echo `date +%Y%m%d%H%M%S` $$ Start New VM >> ~/logs/loop.log ./donewvm.sh sleep 10 echo `date +%Y%m%d%H%M%S` $$ New host keys >> ~/logs/loop.log ansible-playbook -l @hosts_online.ini vmsetup/checkkey.yml echo `date +%Y%m%d%H%M%S` $$ Done New VM >> ~/logs/loop.log sleep 10 done <file_sep>#!/bin/sh HOST=$1 TMPFILE=$(mktemp) nmap --min-rtt-timeout 2 --max-rtt-timeout 5 -sn -PS22 -iL $HOST|awk '/scan report/ { print $5 }'|sort > $TMPFILE comm -13 $TMPFILE $HOST rm $TMPFILE <file_sep>#!/bin/sh WORKDIR=$(mktemp -d) if [ -n "$1" ]; then gpg --pinentry-mode=loopback --passphrase "$1" -o $WORKDIR/tasks.tar.bz2 /opt/ioi/config/tasks.tar.bz2.gpg else gpg -o $WORKDIR/tasks.tar.bz2 /opt/ioi/config/tasks.tar.bz2.gpg fi if [ -f "$WORKDIR/tasks.tar.bz2" ]; then tar jxf "$WORKDIR/tasks.tar.bz2" -C ~ioi/Documents/ fi rm -f $WORKDIR/tasks.tar.bz2 rmdir $WORKDIR <file_sep>#!/bin/sh TMPFILE=$(mktemp) if [ ! -f ~/logs/hosts_new.ini ]; then echo No New hosts echo `date +%Y%m%d%H%M%S` No new hosts >> ~/logs/loop.log exit fi mv ~/logs/hosts_new.ini $TMPFILE echo New hosts $(cat $TMPFILE | paste -sd,) echo `date +%Y%m%d%H%M%S` New hosts contestdone $(cat $TMPFILE | paste -sd,) >> ~/logs/loop.log ansible-playbook -l @$TMPFILE vmsetup/setup.yml --tags contestdone #ansible-playbook -l @$TMPFILE vmsetup/setup.yml --tags init #ansible-playbook -l @$TMPFILE statements/dist_statements.yml echo SKIPPING New host processing rm $TMPFILE <file_sep>#!/bin/bash QUIET=0 MODE=backup while [[ $# -gt 0 ]]; do case $1 in -r) MODE=restore shift ;; esac done if [ -f /opt/ioi/run/ioibackup.pid ]; then if ps -p "$(cat /opt/ioi/run/ioibackup.pid)" > /dev/null; then echo Already running exit 1 fi fi echo $$ >> /opt/ioi/run/ioibackup.pid if [ "$MODE" = "backup" ]; then cat - <<EOM Backing up home directory. Only non-hidden files up to a maximum of 1 MB in size will be backed up. EOM rsync -e "ssh -i /opt/ioi/config/ssh/ioibackup" \ -avz \ --max-size=1M --bwlimit=1000 --exclude='.*' --exclude='*.pdf' ~ioi/ ioibackup<EMAIL>20.sg: elif [ "$MODE" = "restore" ]; then echo Restoring into /tmp/restore. if [ -e /tmp/restore ]; then cat - <<EOM Error: Unable to restore because /tmp/restore already exist. Remove or move away the existing file or directory before running again. EOM else rsync -e "ssh -i /opt/ioi/config/ssh/ioibackup" \ -avz --max-size=1M --bwlimit=1000 --exclude='.*' \ <EMAIL>@io<EMAIL>2<EMAIL>: /tmp/restore fi fi rm /opt/ioi/run/ioibackup.pid # vim: ft=bash ts=4 noet <file_sep>#!/bin/sh check_ip() { local IP=$1 if expr "$IP" : '[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$' >/dev/null; then return 0 else return 1 fi } do_config() { CONF=$1 if ! test -f "$CONF"; then echo "Can't read $CONF" exit 1 fi WORKDIR=`mktemp -d` tar jxf $CONF -C $WORKDIR if [ $? -ne 0 ]; then echo "Failed to unpack $CONF" rm -rf $WORKDIR exit 1 fi IP=$(cat $WORKDIR/vpn/ip.conf) MASK=$(cat $WORKDIR/vpn/mask.conf) if ! check_ip "$IP" || ! check_ip "$MASK"; then echo Bad IP numbers rm -r $WORKDIR exit 1 fi echo "$IP" > /etc/tinc/vpn/ip.conf echo "$MASK" > /etc/tinc/vpn/mask.conf rm /etc/tinc/vpn/hosts/* 2> /dev/null cp $WORKDIR/vpn/hosts/* /etc/tinc/vpn/hosts/ cp $WORKDIR/vpn/rsa_key.* /etc/tinc/vpn/ cp $WORKDIR/vpn/tinc.conf /etc/tinc/vpn cp $WORKDIR/vpn/ioibackup* /opt/ioi/config/ssh/ rm -r $WORKDIR USERID=$(cat /etc/tinc/vpn/tinc.conf | grep Name | cut -d\ -f3) chfn -f "$USERID" ioi #hostnamectl set-hostname "$USERID" # Stop Zabbix agent systemctl stop zabbix-agent systemctl disable zabbix-agent 2> /dev/null rm /etc/zabbix/* 2> /dev/null # Restart firewall and VPN systemctl enable tinc@vpn 2> /dev/null systemctl restart tinc@vpn /opt/ioi/sbin/firewall.sh start # Start Zabbix configuration systemctl start zabbix-agent # Generate an instance ID to uniquely id this VM if [ ! -f /opt/ioi/run/instanceid.txt ]; then openssl rand 10 | base32 > /opt/ioi/run/instanceid.txt fi return } case "$1" in fwstart) if [ -e /opt/ioi/run/lockdown ]; then echo Not allowed to control firewall during lockdown mode else /opt/ioi/sbin/firewall.sh start fi ;; fwstop) if [ -e /opt/ioi/run/lockdown ]; then echo Not allowed to control firewall during lockdown mode else /opt/ioi/sbin/firewall.sh stop fi ;; vpnclear) if [ -e /opt/ioi/run/lockdown ]; then echo Not allowed to control firewall during lockdown mode else systemctl stop tinc@vpn systemctl disable tinc@vpn 2> /dev/null systemctl stop zabbix-agent systemctl disable zabbix-agent 2> /dev/null rm /etc/zabbix/* 2> /dev/null /opt/ioi/sbin/firewall.sh stop rm /etc/tinc/vpn/ip.conf 2> /dev/null rm /etc/tinc/vpn/mask.conf 2> /dev/null rm /etc/tinc/vpn/hosts/* 2> /dev/null rm /etc/tinc/vpn/rsa_key.* 2> /dev/null rm /etc/tinc/vpn/tinc.conf 2> /dev/null rm /opt/ioi/config/ssh/ioibackup* 2> /dev/null chfn -f "" ioi #hostnamectl set-hostname "ioi" system stop zabbix-agent fi ;; vpnstart) systemctl start tinc@vpn /opt/ioi/sbin/firewall.sh start ;; vpnrestart) systemctl restart tinc@vpn /opt/ioi/sbin/firewall.sh start ;; vpnstatus) systemctl status tinc@vpn ;; setvpnproto) if [ "$2" = "tcp" ]; then sed -i '/^TCPOnly/ s/= no$/= yes/' /etc/tinc/vpn/tinc.conf echo VPN protocol set to TCP only. elif [ "$2" = "auto" ]; then sed -i '/^TCPOnly/ s/= yes$/= no/' /etc/tinc/vpn/tinc.conf echo VPN procotol set to auto TCP/UDP with fallback to TCP only. else cat - <<EOM Invalid argument to setvpnproto. Specify "yes" to use TCP only, or "auto" to allow TCP/UDP with fallback to TCP only. EOM exit 1 fi ;; vpnconfig) do_config $2 ;; settz) tz=$2 if [ -z "$2" ]; then cat - <<EOM No timezone specified. Run tzselect to learn about the valid timezones available on this system. EOM exit 1 fi if [ -f "/usr/share/zoneinfo/$2" ]; then cat - <<EOM Your timezone will be set to $2 at your next login. *** Please take note that all dates and times communicated by the IOI 2020 *** *** organisers will be in Asia/Singapore timezone (GMT+08), unless it is *** *** otherwise specified. *** EOM echo "$2" > /opt/ioi/config/timezone else cat - <<EOM Timezone $2 is not valid. Run tzselect to learn about the valid timezones available on this system. EOM exit 1 fi ;; setautobackup) if [ "$2" = "on" ]; then touch /opt/ioi/config/autobackup echo Auto backup enabled elif [ "$2" = "off" ]; then if [ -f /opt/ioi/config/autobackup ]; then rm /opt/ioi/config/autobackup fi echo Auto backup disabled else cat - <<EOM Invalid argument to setautobackup. Specify "on" to enable automatic backup of home directory, or "off" to disable automatic backup. You can always run "ioibackup" manually to backup at any time. Backups will only include non-hidden files less than 1MB in size. EOM fi ;; setscreenlock) if [ "$2" = "on" ]; then touch /opt/ioi/config/screenlock echo Screensaver lock enabled elif [ "$2" = "off" ]; then if [ -f /opt/ioi/config/screenlock ]; then rm /opt/ioi/config/screenlock fi echo Screensaver lock disabled else cat - <<EOM Invalid argument to setscreenlock. Specify "on" to enable screensaver lock, or "off" to disable screensaver lock. EOM fi ;; *) echo Not allowed ;; esac # vim: ft=sh ts=4 sw=4 noet <file_sep>ARG1 ansible_host=10.10.4.1 ARG2 ansible_host=10.10.20.1 ARG3 ansible_host=10.10.4.2 ARG4 ansible_host=10.10.20.2 ARM1 ansible_host=10.10.4.3 ARM2 ansible_host=10.10.20.3 ARM3 ansible_host=10.10.4.4 ARM4 ansible_host=10.10.20.4 AUS1 ansible_host=10.10.4.5 AUS2 ansible_host=10.10.20.5 AUS3 ansible_host=10.10.4.6 AUS4 ansible_host=10.10.20.6 AUT1 ansible_host=10.10.4.7 AUT2 ansible_host=10.10.20.7 AUT3 ansible_host=10.10.4.8 AUT4 ansible_host=10.10.20.8 AZE1 ansible_host=10.10.4.9 AZE2 ansible_host=10.10.20.9 AZE3 ansible_host=10.10.4.10 AZE4 ansible_host=10.10.20.10 BEL1 ansible_host=10.10.4.15 BEL2 ansible_host=10.10.20.15 BEL3 ansible_host=10.10.4.16 BEL4 ansible_host=10.10.20.16 BGD1 ansible_host=10.10.4.11 BGD2 ansible_host=10.10.20.11 BGD3 ansible_host=10.10.4.12 BGD4 ansible_host=10.10.20.12 BGR1 ansible_host=10.10.4.23 BGR2 ansible_host=10.10.20.23 BGR3 ansible_host=10.10.4.24 BGR4 ansible_host=10.10.20.24 BIH1 ansible_host=10.10.4.19 BIH2 ansible_host=10.10.20.19 BIH3 ansible_host=10.10.4.20 BIH4 ansible_host=10.10.20.20 BLR1 ansible_host=10.10.4.13 BLR2 ansible_host=10.10.20.13 BLR3 ansible_host=10.10.4.14 BLR4 ansible_host=10.10.20.14 BOL1 ansible_host=10.10.4.17 BOL2 ansible_host=10.10.20.17 BOL3 ansible_host=10.10.4.18 BOL4 ansible_host=10.10.20.18 BRA1 ansible_host=10.10.4.21 BRA2 ansible_host=10.10.20.21 BRA3 ansible_host=10.10.4.22 BRA4 ansible_host=10.10.20.22 CAN1 ansible_host=10.10.4.25 CAN2 ansible_host=10.10.20.25 CAN3 ansible_host=10.10.4.26 CAN4 ansible_host=10.10.20.26 CHE1 ansible_host=10.10.20.147 CHE2 ansible_host=10.10.4.148 CHE3 ansible_host=10.10.20.148 CHE4 ansible_host=10.10.4.149 CHL1 ansible_host=10.10.4.27 CHL2 ansible_host=10.10.20.27 CHL3 ansible_host=10.10.4.28 CHL4 ansible_host=10.10.20.28 CHN1 ansible_host=10.10.4.29 CHN2 ansible_host=10.10.20.29 CHN3 ansible_host=10.10.4.30 CHN4 ansible_host=10.10.20.30 COL1 ansible_host=10.10.4.31 COL2 ansible_host=10.10.20.31 COL3 ansible_host=10.10.4.32 COL4 ansible_host=10.10.20.32 CUB1 ansible_host=10.10.4.35 CUB2 ansible_host=10.10.20.35 CUB3 ansible_host=10.10.4.36 CUB4 ansible_host=10.10.20.36 CYP1 ansible_host=10.10.4.37 CYP2 ansible_host=10.10.20.37 CYP3 ansible_host=10.10.4.38 CYP4 ansible_host=10.10.20.38 CZE1 ansible_host=10.10.4.39 CZE2 ansible_host=10.10.20.39 CZE3 ansible_host=10.10.4.40 CZE4 ansible_host=10.10.20.40 DEU1 ansible_host=10.10.20.56 DEU2 ansible_host=10.10.4.57 DEU3 ansible_host=10.10.20.57 DEU4 ansible_host=10.10.4.58 DNK1 ansible_host=10.10.4.41 DNK2 ansible_host=10.10.20.41 DNK3 ansible_host=10.10.4.42 DNK4 ansible_host=10.10.20.42 DOM1 ansible_host=10.10.4.43 DOM2 ansible_host=10.10.20.43 DOM3 ansible_host=10.10.4.44 EGY1 ansible_host=10.10.20.44 EGY2 ansible_host=10.10.4.45 EGY3 ansible_host=10.10.20.45 EGY4 ansible_host=10.10.4.46 ESP1 ansible_host=10.10.20.141 ESP2 ansible_host=10.10.4.142 ESP3 ansible_host=10.10.20.142 ESP4 ansible_host=10.10.4.143 EST1 ansible_host=10.10.20.48 EST2 ansible_host=10.10.4.49 EST3 ansible_host=10.10.20.49 EST4 ansible_host=10.10.4.50 FIN1 ansible_host=10.10.20.50 FIN2 ansible_host=10.10.4.51 FIN3 ansible_host=10.10.20.51 FIN4 ansible_host=10.10.4.52 FRA1 ansible_host=10.10.20.52 FRA2 ansible_host=10.10.4.53 FRA3 ansible_host=10.10.20.53 FRA4 ansible_host=10.10.4.54 GBR1 ansible_host=10.10.20.165 GBR2 ansible_host=10.10.4.166 GBR3 ansible_host=10.10.20.166 GBR4 ansible_host=10.10.4.167 GEO1 ansible_host=10.10.20.54 GEO2 ansible_host=10.10.4.55 GEO3 ansible_host=10.10.20.55 GEO4 ansible_host=10.10.4.56 GRC1 ansible_host=10.10.20.58 GRC2 ansible_host=10.10.4.59 GRC3 ansible_host=10.10.20.59 GRC4 ansible_host=10.10.4.60 HKG1 ansible_host=10.10.20.60 HKG2 ansible_host=10.10.4.61 HKG3 ansible_host=10.10.20.61 HKG4 ansible_host=10.10.4.62 HRV1 ansible_host=10.10.4.33 HRV2 ansible_host=10.10.20.33 HRV3 ansible_host=10.10.4.34 HRV4 ansible_host=10.10.20.34 HUN1 ansible_host=10.10.20.62 HUN2 ansible_host=10.10.4.63 HUN3 ansible_host=10.10.20.63 HUN4 ansible_host=10.10.4.64 IDN1 ansible_host=10.10.20.68 IDN2 ansible_host=10.10.4.69 IDN3 ansible_host=10.10.20.69 IDN4 ansible_host=10.10.4.70 IND1 ansible_host=10.10.20.66 IND2 ansible_host=10.10.4.67 IND3 ansible_host=10.10.20.67 IND4 ansible_host=10.10.4.68 IRL1 ansible_host=10.10.20.72 IRL2 ansible_host=10.10.4.73 IRL3 ansible_host=10.10.20.73 IRL4 ansible_host=10.10.4.74 IRN1 ansible_host=10.10.20.70 IRN2 ansible_host=10.10.4.71 IRN3 ansible_host=10.10.20.71 IRN4 ansible_host=10.10.4.72 ISL1 ansible_host=10.10.20.64 ISL2 ansible_host=10.10.4.65 ISL3 ansible_host=10.10.20.65 ISL4 ansible_host=10.10.4.66 ISR1 ansible_host=10.10.20.74 ISR2 ansible_host=10.10.4.75 ISR3 ansible_host=10.10.20.75 ISR4 ansible_host=10.10.4.76 ITA1 ansible_host=10.10.20.76 ITA2 ansible_host=10.10.4.77 ITA3 ansible_host=10.10.20.77 ITA4 ansible_host=10.10.4.78 JOR1 ansible_host=10.10.20.80 JOR2 ansible_host=10.10.4.81 JOR3 ansible_host=10.10.20.81 JOR4 ansible_host=10.10.4.82 JPN1 ansible_host=10.10.20.78 JPN2 ansible_host=10.10.4.79 JPN3 ansible_host=10.10.20.79 JPN4 ansible_host=10.10.4.80 KAZ1 ansible_host=10.10.20.82 KAZ2 ansible_host=10.10.4.83 KAZ3 ansible_host=10.10.20.83 KAZ4 ansible_host=10.10.4.84 KGZ1 ansible_host=10.10.20.84 KGZ2 ansible_host=10.10.4.85 KGZ3 ansible_host=10.10.20.85 KGZ4 ansible_host=10.10.4.86 KOR1 ansible_host=10.10.20.121 KOR2 ansible_host=10.10.4.122 KOR3 ansible_host=10.10.20.122 KOR4 ansible_host=10.10.4.123 LKA1 ansible_host=10.10.20.143 LKA2 ansible_host=10.10.4.144 LKA3 ansible_host=10.10.20.144 LKA4 ansible_host=10.10.4.145 LTU1 ansible_host=10.10.20.88 LTU2 ansible_host=10.10.4.89 LTU3 ansible_host=10.10.20.89 LTU4 ansible_host=10.10.4.90 LUX1 ansible_host=10.10.20.90 LUX2 ansible_host=10.10.4.91 LUX3 ansible_host=10.10.20.91 LUX4 ansible_host=10.10.4.92 LVA1 ansible_host=10.10.20.86 LVA2 ansible_host=10.10.4.87 LVA3 ansible_host=10.10.20.87 LVA4 ansible_host=10.10.4.88 MAC1 ansible_host=10.10.20.92 MAC2 ansible_host=10.10.4.93 MAC3 ansible_host=10.10.20.93 MAC4 ansible_host=10.10.4.94 MAR1 ansible_host=10.10.20.103 MAR2 ansible_host=10.10.4.104 MAR3 ansible_host=10.10.20.104 MAR4 ansible_host=10.10.4.105 MEX1 ansible_host=10.10.20.98 MEX2 ansible_host=10.10.4.99 MEX3 ansible_host=10.10.20.99 MEX4 ansible_host=10.10.4.100 MKD1 ansible_host=10.10.20.94 MKD2 ansible_host=10.10.4.95 MKD3 ansible_host=10.10.20.95 MKD4 ansible_host=10.10.4.96 MNE1 ansible_host=10.10.20.102 MNE2 ansible_host=10.10.4.103 MNG1 ansible_host=10.10.20.100 MNG2 ansible_host=10.10.4.101 MNG3 ansible_host=10.10.20.101 MNG4 ansible_host=10.10.4.102 MYS1 ansible_host=10.10.20.96 MYS2 ansible_host=10.10.4.97 MYS3 ansible_host=10.10.20.97 MYS4 ansible_host=10.10.4.98 NGA1 ansible_host=10.10.20.109 NGA2 ansible_host=10.10.4.110 NGA3 ansible_host=10.10.20.110 NGA4 ansible_host=10.10.4.111 NLD1 ansible_host=10.10.20.105 NLD2 ansible_host=10.10.4.106 NLD3 ansible_host=10.10.20.106 NLD4 ansible_host=10.10.4.107 NOR1 ansible_host=10.10.20.111 NOR2 ansible_host=10.10.4.112 NOR3 ansible_host=10.10.20.112 NOR4 ansible_host=10.10.4.113 NZL1 ansible_host=10.10.20.107 NZL2 ansible_host=10.10.4.108 NZL3 ansible_host=10.10.20.108 NZL4 ansible_host=10.10.4.109 PHL1 ansible_host=10.10.20.115 PHL2 ansible_host=10.10.4.116 PHL3 ansible_host=10.10.20.116 PHL4 ansible_host=10.10.4.117 POL1 ansible_host=10.10.20.117 POL2 ansible_host=10.10.4.118 POL3 ansible_host=10.10.20.118 POL4 ansible_host=10.10.4.119 PRT1 ansible_host=10.10.20.119 PRT2 ansible_host=10.10.4.120 PRT3 ansible_host=10.10.20.120 PRT4 ansible_host=10.10.4.121 PSE1 ansible_host=10.10.20.113 PSE2 ansible_host=10.10.4.114 PSE3 ansible_host=10.10.20.114 PSE4 ansible_host=10.10.4.115 ROU1 ansible_host=10.10.20.123 ROU2 ansible_host=10.10.4.124 ROU3 ansible_host=10.10.20.124 ROU4 ansible_host=10.10.4.125 RUS1 ansible_host=10.10.20.125 RUS2 ansible_host=10.10.4.126 RUS3 ansible_host=10.10.20.126 RUS4 ansible_host=10.10.4.127 SAU1 ansible_host=10.10.20.127 SAU2 ansible_host=10.10.4.128 SAU3 ansible_host=10.10.20.128 SAU4 ansible_host=10.10.4.129 SGP1 ansible_host=10.10.20.131 SGP2 ansible_host=10.10.4.132 SGP3 ansible_host=10.10.20.132 SGP4 ansible_host=10.10.4.133 SGP5 ansible_host=10.10.20.133 SGP6 ansible_host=10.10.4.134 SGP7 ansible_host=10.10.20.134 SGP8 ansible_host=10.10.4.135 SLV1 ansible_host=10.10.20.46 SLV2 ansible_host=10.10.4.47 SLV3 ansible_host=10.10.20.47 SLV4 ansible_host=10.10.4.48 SRB1 ansible_host=10.10.20.129 SRB2 ansible_host=10.10.4.130 SRB3 ansible_host=10.10.20.130 SRB4 ansible_host=10.10.4.131 SVK1 ansible_host=10.10.20.135 SVK2 ansible_host=10.10.4.136 SVK3 ansible_host=10.10.20.136 SVK4 ansible_host=10.10.4.137 SVN1 ansible_host=10.10.20.137 SVN2 ansible_host=10.10.4.138 SVN3 ansible_host=10.10.20.138 SVN4 ansible_host=10.10.4.139 SWE1 ansible_host=10.10.20.145 SWE2 ansible_host=10.10.4.146 SWE3 ansible_host=10.10.20.146 SWE4 ansible_host=10.10.4.147 SYR1 ansible_host=10.10.20.149 SYR2 ansible_host=10.10.4.150 SYR3 ansible_host=10.10.20.150 SYR4 ansible_host=10.10.4.151 THA1 ansible_host=10.10.20.155 THA2 ansible_host=10.10.4.156 THA3 ansible_host=10.10.20.156 THA4 ansible_host=10.10.4.157 TJK1 ansible_host=10.10.20.153 TJK2 ansible_host=10.10.4.154 TJK3 ansible_host=10.10.20.154 TJK4 ansible_host=10.10.4.155 TKM1 ansible_host=10.10.20.161 TKM2 ansible_host=10.10.4.162 TKM3 ansible_host=10.10.20.162 TKM4 ansible_host=10.10.4.163 TUN1 ansible_host=10.10.20.157 TUN2 ansible_host=10.10.4.158 TUN3 ansible_host=10.10.20.158 TUN4 ansible_host=10.10.4.159 TUR1 ansible_host=10.10.20.159 TUR2 ansible_host=10.10.4.160 TUR3 ansible_host=10.10.20.160 TUR4 ansible_host=10.10.4.161 TWN1 ansible_host=10.10.20.151 TWN2 ansible_host=10.10.4.152 TWN3 ansible_host=10.10.20.152 TWN4 ansible_host=10.10.4.153 UKR1 ansible_host=10.10.20.163 UKR2 ansible_host=10.10.4.164 UKR3 ansible_host=10.10.20.164 UKR4 ansible_host=10.10.4.165 USA1 ansible_host=10.10.20.167 USA2 ansible_host=10.10.4.168 USA3 ansible_host=10.10.20.168 USA4 ansible_host=10.10.4.169 UZB1 ansible_host=10.10.20.169 UZB2 ansible_host=10.10.4.170 UZB3 ansible_host=10.10.20.170 UZB4 ansible_host=10.10.4.171 VEN1 ansible_host=10.10.20.171 VEN2 ansible_host=10.10.4.172 VEN3 ansible_host=10.10.20.172 VEN4 ansible_host=10.10.4.173 VNM1 ansible_host=10.10.20.173 VNM2 ansible_host=10.10.4.174 VNM3 ansible_host=10.10.20.174 VNM4 ansible_host=10.10.4.175 ZAF1 ansible_host=10.10.20.139 ZAF2 ansible_host=10.10.4.140 ZAF3 ansible_host=10.10.20.140 ZAF4 ansible_host=10.10.4.141 ZZY1 ansible_host=10.10.20.175 ZZY2 ansible_host=10.10.4.176 ZZY3 ansible_host=10.10.20.176 ZZY4 ansible_host=10.10.4.177 ZZZ1 ansible_host=10.10.20.177 ZZZ2 ansible_host=10.10.4.178 ZZZ3 ansible_host=10.10.20.178 ZZZ4 ansible_host=10.10.4.179
eeedff40b0080836fbf236efc55553fc19a17fa0
[ "INI", "Text", "Shell" ]
9
Text
lzs/ioi2020-pc-ansible
a5d92f9ea24cb802195fcf18a12b5c751a832793
e792c58aa56999b5dcf947b330049cbc5bcb36b2
refs/heads/master
<file_sep>import React, { useEffect, useState } from "react"; import axios from "axios"; import './component.css'; function Display({ history }) { const [data, setData] = useState([]); useEffect(() => { const token = JSON.parse(localStorage.getItem("token")); const url = "http://localhost:5000/api/restricted/data"; if (token) { axios .get(url, { headers: { Authorization: token } }) .then(response => { console.log('list', response) setData(response.data); }) .catch(e => { console.log('error', e.response); localStorage.removeItem("token"); history.push("/"); }); } },[]); if (!data) return <div>Loading</div> return ( <> <div className="friendHeader"> <p>My Favorite Dishes</p> </div> {data.map(data => <div className="friendList"> <p className="friendName">Course: {data.course}</p> <p className="friendAge">Title: {data.name}</p> <p className="friendEmail">Technique: {data.technique}</p> </div>)} <button className="btn logoutButton" onClick={() => { localStorage.removeItem("token"); history.push("/"); window.location.reload(); }} > Logout </button> </> ); } export default Display; <file_sep>import React from "react"; import { withFormik, Form, Field } from "formik"; import * as Yup from "yup"; import axios from "axios"; import { Redirect } from "react-router-dom"; import { useLocalStorage } from "../hooks/useLocalStorage"; function Login({ touched, errors, token, setToken }) { if (token) { return <Redirect to="/display" />; } return ( <Form className="form"> <div className="form-group"> <label className="label">Username</label> <Field className="input" name="username" type="text" autoComplete="off" /> <p>{touched.username && errors.username}</p> </div> <div className="form-group"> <label className="label">Password</label> <Field className="input" name="password" type="password" autoComplete="off" /> </div> <p>{touched.password && errors.password}</p> <button className="btn" type="submit">Submit &rarr;</button> </Form> ); } export default withFormik({ mapPropsToValues() { return { username: "", password: "" }; }, validationSchema: Yup.object().shape({ username: Yup.string() .required(), password: Yup.string() .min(6) .required() }), handleSubmit(values, formikBag) { const {setToken} = formikBag.props; const url = "http://localhost:5000/api/register"; axios .post(url, values) .then(response => { setToken(response.data.token); formikBag.props.history.push("/display"); console.log('login Response', response); }) .catch(e => { console.log('login error', e.response); }); } })(Login);
c8a73d2032b9a8e47371241e6f7e0d3caba79edd
[ "JavaScript" ]
2
JavaScript
Blake-Goms/Sprint-Challenge-Form-Management
0d44592a15ba959fd7a71623a88273f14ef1c72f
81b5e5a497f95d97d03ae4438296bb62d769d57f
refs/heads/master
<repo_name>benoist/json-jwt<file_sep>/spec/spec_helper.rb require 'simplecov' SimpleCov.start do add_filter 'spec' end require 'rspec' require 'json/jwt' require 'helpers/sign_key_fixture_helper' require 'helpers/nimbus_spec_helper'
87db0ab1641d41f2a79cd9dba7fc2c86d079c337
[ "Ruby" ]
1
Ruby
benoist/json-jwt
e0004411e69b7dec31d4e3983787e40124cd4064
29c2147803cff529dad8fe694041b011bc93a9c1
refs/heads/master
<file_sep>var num = 2; setInterval(function () { if (num < 4) { var a = document.getElementById("slider"); a.style.backgroundImage = "url('d" + num + ".jpg')"; num++; } else num = 1; }, 2000)
0dafe8bdda7441587dc6d242d23c3564b2d08dc9
[ "JavaScript" ]
1
JavaScript
warlock/flexbox-testing
a6c95c44113830791468b35b4ce57860fdaea088
9d9b800b0c58691c333438f87fbe2f9d58bd6bb0
refs/heads/master
<file_sep>#pragma once #include <inttypes.h> #include <string> #include <vector> #include <glm/glm.hpp> #include <glm/gtc/matrix_transform.hpp> #include <glm/gtx/transform.hpp> #include <glm\gtc\quaternion.hpp> #include <glm/gtc/type_ptr.hpp> //Switch between CPU and GPU computation #define CUDA 1 #if CUDA #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <vector_types.h> #include <device_functions.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #define KERNEL 2 //0 == slow, 1 == medium, 2 == fast //256, 512, 1024, 2048, 3072, 5120, 8192, 10240, 20480, 30720, 40960, 51200, 61440, 71680, 81920, 100096 //150016, 200192, 250112, 500224, 750080, 1000192, 1250048 #define THREADS_PER_BLOCK 256 #else //200, 500, 1000, 2000, 3000, 5000, 8000, 10000, 20000 //Switch threads on #define THREADED 1 #endif enum BUFFERS { VERTEX_BUFFER, INDEX_BUFFER, INDIRECT_BUFFER, MAX_BUFFER }; #define SHADER_DIR "..\\..\\Shaders\\" //#define SHADER_DIR "..\\..\\..\\Shaders\\" #define maximum(a,b) (((a) > (b)) ? (a) : (b)) #define minimum(a,b) (((a) < (b)) ? (a) : (b)) #define PI acos(-1.0)<file_sep>#include "Boid.h" #include <algorithm> #define K 450 const float Boid::MAX_SPEED = 12.0f; const float Boid::MAX_SPEED_SQR = MAX_SPEED * MAX_SPEED; glm::vec3 Boid::m_Heading = glm::vec3(0, 0, 0); Boid::Boid(glm::vec3 spawnPosition, glm::vec3 initialVelocity, const std::string& name) : Entity(name) { m_Position = spawnPosition; m_Velocity = initialVelocity; m_WorldTransformPtr = &m_WorldTransform; neighbours.clear(); } Boid::~Boid() { } void Boid::OnUpdateObject(float dt) { std::partial_sort(neighbours.begin(), neighbours.begin() + K, neighbours.end(), comp); CalculateVelocity(dt); m_Position += (m_Velocity * (1.0f / dt)); m_WorldTransform = glm::mat4_cast(glm::quat(m_Velocity)) * glm::translate(m_Position); m_WorldTransform = m_WorldTransform * m_LocalTransform; neighbours.clear(); } void Boid::CalculateVelocity(float dt) { glm::vec3 avgPos = glm::vec3(0, 0, 0); glm::vec3 seperation = glm::vec3(0, 0, 0); glm::vec3 avgVel = glm::vec3(0, 0, 0); float avgDiv = 1.0f / float(K); //Loop through neighbours for (unsigned int i = 0; i < K; ++i) { avgPos += neighbours[i].n->m_Position; seperation -= (neighbours[i].n->m_Position - m_Position) * (1.0f / sqrtf(neighbours[i].dist)); avgVel += neighbours[i].n->m_Velocity; } //Calculate Cohesion avgPos *= avgDiv; m_CohesiveVector = (avgPos - m_Position); float mag = glm::length(m_CohesiveVector); m_CohesiveVector = glm::normalize(m_CohesiveVector); m_CohesiveVector *= (MAX_SPEED * (mag * 0.001f)); m_CohesiveVector -= m_Velocity; //Calculate Seperation seperation *= avgDiv; m_SeperationVector *= 0.25f; //Calculate Alignment avgVel *= avgDiv; m_AlignmentVector = (avgVel - m_Velocity); //Calculate final velocity m_Velocity += (m_SeperationVector + m_CohesiveVector + m_AlignmentVector + (glm::cross(m_Heading, m_Position) * 0.05f)) * (1.0f / dt); float speed = glm::dot(m_Velocity, m_Velocity); if (speed > MAX_SPEED_SQR) { m_Velocity = (m_Velocity * (1.0f / sqrtf(speed))) * MAX_SPEED; } m_Velocity *= m_DampingFactor; }<file_sep>#pragma once #include <GL/glew.h> #include <GLFW/glfw3.h> #include <iostream> #include "Timer.h" class Scene; class Shader; class OGLRenderer { public: static OGLRenderer* Instance(); static unsigned int num_boids; static void Release(); void Render(Timer* t); bool ShouldClose(); void SetCurrentShader(Shader* s); void SetCurrentScene(Scene* s) { currentScene = s; } GLFWwindow* GetWindow() const { return window; } unsigned int GetWindowWidth() const { return WIDTH; } unsigned int GetWindowHeight() const { return HEIGHT; } unsigned int GetSSBO_ID() const { return SSBO; } float GetElapsed() const { return elapsedCPU; } private: OGLRenderer(); ~OGLRenderer(); void init_glew(); void init_glfw(); const GLFWvidmode* MODE; //Pointer to current GLFW window object. GLFWwindow* window; Shader* currentShader; Scene* currentScene; static OGLRenderer* instance; float elapsedCPU = 0.0f; unsigned int SSBO; const unsigned int WIDTH = 1600; const unsigned int HEIGHT = 900; const char* TITLE = "MComp Dissertation - Boids Simulation"; };<file_sep>#pragma once #include "Scene.h" #include "Boid.h" #include <future> #include <random> #include <functional> #include "Common.h" #if CUDA #include "BoidGPU.h" #endif class BoidScene : public Scene { public: BoidScene(unsigned int numberOfBoids, Shader* shader, Mesh* mesh); virtual ~BoidScene(); virtual void UpdateScene(float dt) override; virtual void RenderScene() override; Boid* GetBoid(unsigned int i) { return i < boids.size() ? boids[i] : nullptr; } std::vector<Boid*>& GetBoidData() { return boids; } #if CUDA float GetCUDAElapsedTime() const { return elapsed_cuda; } #endif private: BoidScene() {} void InitGenerator(int spread); std::vector<Boid*> boids; std::function<float()> rndX; std::function<float()> rndY; std::function<float()> rndZ; float count = 0.0f; glm::vec3 m_FlockHeading; unsigned int numBoids; #if CUDA BoidGPU* boidsDevice; glm::mat4* modelMatricesDevice; uint32_t BLOCKS_PER_GRID; thrust::device_ptr<int> dev_key_ptr; thrust::device_ptr<unsigned int> dev_val_ptr; cudaEvent_t start, stop; float elapsed_cuda = 0.0f; #endif #if THREADED std::vector<std::future<void>> futures; const unsigned int NUMBER_OF_THREADS = 8; void UpdatePartition(size_t begin, size_t end, float dt); #endif }; #if CUDA __global__ void ComputeKNN(BoidGPU* boids, unsigned int numBoids); __global__ void ComputeRules(BoidGPU* boids); __global__ void UpdateBoid(BoidGPU* boids, glm::mat4* boidMat, const glm::vec3 heading, const float dt); #endif<file_sep>//Author: <NAME> #include "Camera.h" #include <GLFW/glfw3.h> #include "Common.h" Camera::Camera(GLFWwindow* window, unsigned int window_height, unsigned int window_width, float FOV, float nearPlane, float farPlane) { this->window = window; this->window_height = window_height; this->window_width = window_width; half_width = window_width / 2.0f; half_height = window_height / 2.0f; Projection = glm::perspective(FOV, GLfloat(window_width) / GLfloat(window_height), nearPlane, farPlane); position = glm::vec3(0.0, 0.0, 0.0); target = glm::vec3(0.0, 0.0, 0.0); look = glm::vec3(0, 0, 1); right = glm::vec3(1, 0, 0); up = glm::vec3(0, 1, 0); } void Camera::UpdateCamera(float dt) { pollKeyBoard(dt); pollMouse(dt); View = glm::mat4_cast(glm::quat(glm::vec3(glm::radians(pitch), glm::radians(yaw), 0.0))) * glm::translate(glm::mat4(1.0f), position); look = glm::normalize(glm::mat3(View) * glm::vec3(0.0, 0.0, 1.0)); up = glm::normalize(glm::mat3(View) * glm::vec3(0.0, 1.0, 0.0)); right = glm::normalize(glm::cross(look, up)); target = position + look; View = glm::lookAt(position, target, up); VP = Projection * View; } //Poll keyboard input. void Camera::pollKeyBoard(float dt) { float deltaMove = dt * speed; // Move forward if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS) { position += (look * deltaMove); } // Move backward if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS) { position -= (look * deltaMove); } // Strafe right if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS) { position += (right * deltaMove); } // Strafe left if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS) { position -= (right * deltaMove); } //move up if (glfwGetKey(window, GLFW_KEY_Q) == GLFW_PRESS) { position += (up * deltaMove); } //move down if (glfwGetKey(window, GLFW_KEY_E) == GLFW_PRESS) { position -= (up * deltaMove); } } //Poll mouse input. void Camera::pollMouse(float dt) { double x, y; float deltaMove = dt * mouseSpeed; glfwGetCursorPos(window, &x, &y); pitch += deltaMove * (half_height - y); yaw += deltaMove * (half_width - x); glfwSetCursorPos(window, half_width, half_height); if (yaw < 0.0) { yaw += 360.0; } if (yaw > 360.0) { yaw -= 360.0; } pitch = minimum(pitch, 90.0f); pitch = maximum(pitch, -90.0f); } Camera::~Camera(){}<file_sep>#include "RenderComponent.h" #include "Shader.h" #include "Entity.h" #include "Mesh.h" #include "OGLRenderer.h" #include <glm/gtc/type_ptr.hpp> RenderComponent::RenderComponent(Mesh* mesh, Shader* shader) { m_Mesh = mesh; m_Shader = shader; } RenderComponent::~RenderComponent() { } void RenderComponent::SetParent(Entity* e) { m_Entity = e; } void RenderComponent::Draw() { OGLRenderer::Instance()->SetCurrentShader(m_Shader); #if !CUDA glUniformMatrix4fv(m_Shader->GetModelMatrixLoc(), 1, GL_FALSE, (float*)m_Entity->GetWorldTransform()); #endif m_Mesh->Draw(); }<file_sep>#pragma once #include "Entity.h" #include "Common.h" class Boid; struct BoidNeighbour { Boid* n = nullptr; float dist = 0.0f; }; class Boid : public Entity { friend class BoidScene; public: Boid(glm::vec3 spawnPosition, glm::vec3 initialVelocity, const std::string& name = std::to_string(id)); virtual ~Boid(); inline const glm::vec3& GetPosition() const { return m_Position; } static void UpdateFlockHeading(glm::vec3& heading) { m_Heading = heading; } inline const glm::vec3& GetVelocity() const { return m_Velocity; } inline void AddNeighbour(BoidNeighbour bN) { neighbours.push_back(bN); } protected: virtual void OnUpdateObject(float dt) override; private: void CalculateVelocity(float dt); glm::vec3 m_Position; glm::vec3 m_Velocity; glm::vec3 m_CohesiveVector; glm::vec3 m_SeperationVector; glm::vec3 m_AlignmentVector; const float m_DampingFactor = 0.999f; std::vector<BoidNeighbour> neighbours; static const float MAX_SPEED; static const float MAX_SPEED_SQR; static glm::vec3 m_Heading; struct compare { bool operator()(const BoidNeighbour& a, const BoidNeighbour& b) { return a.dist < b.dist; } } comp; };<file_sep>#pragma once #include "Common.h" #if CUDA //Used to store Boid data struct BoidGPU { glm::vec3* m_Position; glm::vec3* m_Velocity; glm::vec3* m_CohesiveVector; glm::vec3* m_SeperationVector; glm::vec3* m_AlignmentVector; int* m_Key; unsigned int* m_Val; }; #endif<file_sep>#include "Scene.h" #include "OGLRenderer.h" Scene::Scene() { cam = new Camera(OGLRenderer::Instance()->GetWindow(), OGLRenderer::Instance()->GetWindowHeight(), OGLRenderer::Instance()->GetWindowWidth(), 45.0f, 0.1f, 10000.0f); } Scene::~Scene() { for (Entity* e : opaqueObjects) { delete e; e = nullptr; } for (Entity* e : transparentObjects) { delete e; e = nullptr; } opaqueObjects.clear(); transparentObjects.clear(); } Entity* Scene::GetOpaqueObject(unsigned int i) { if (i < opaqueObjects.size()) return opaqueObjects[i]; else return nullptr; } Entity* Scene::GetTransparentObject(unsigned int i) { if (i < transparentObjects.size()) return transparentObjects[i]; else return nullptr; } void Scene::AddEntity(Entity* e) { opaqueObjects.push_back(e); for (auto child : e->GetChildren()) { AddEntity(child); } } void Scene::UpdateScene(float dt) { cam->UpdateCamera(dt); for (unsigned int i = 0; i < opaqueObjects.size(); ++i) opaqueObjects[i]->OnUpdateObject(dt); for (unsigned int i = 0; i < transparentObjects.size(); ++i) transparentObjects[i]->OnUpdateObject(dt); } void Scene::RenderScene() { for (unsigned int i = 0; i < opaqueObjects.size(); ++i) opaqueObjects[i]->OnRenderObject(); for (unsigned int i = 0; i < transparentObjects.size(); ++i) transparentObjects[i]->OnRenderObject(); }<file_sep>#include "BoidScene.h" #include "Common.h" BoidScene::BoidScene(unsigned int numberOfBoids, Shader* shader, Mesh* mesh) { InitGenerator(numberOfBoids); #if CUDA cudaMallocManaged((void**)&boids, numberOfBoids * sizeof(BoidGPU)); BLOCKS_PER_GRID = (numberOfBoids + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK; #endif glm::vec3 pos, vel; for (unsigned int i = 0; i < numberOfBoids; ++i) { pos = glm::vec3(rndX(), rndY(), rndZ()); vel = glm::vec3(rndX(), rndY(), rndZ()); Boid* b = new Boid(0, pos, vel); RenderComponent* rc = new RenderComponent(mesh, shader); b->SetRenderComponent(rc); AddEntity(b); boids.push_back(b); #if CUDA boidsGPU[i] = BoidGPU(50, pos, vel); #endif } m_FlockHeading = glm::vec3(0, 0, 0); #if THREADED futures.clear(); #endif } BoidScene::~BoidScene() { } void BoidScene::InitGenerator(int spread) { std::random_device rD0; std::random_device rD1; std::random_device rD2; std::default_random_engine engine0(rD0()); std::default_random_engine engine1(rD1()); std::default_random_engine engine2(rD2()); std::uniform_real_distribution<float> x(-100.0f, 100.0f); std::uniform_real_distribution<float> y(-100.0f, 100.0f); std::uniform_real_distribution<float> z(-100.0f, 100.0f); rndX = std::bind(x, engine0); rndY = std::bind(y, engine1); rndZ = std::bind(z, engine2); } #if !CUDA #if THREADED void BoidScene::UpdateScene(float dt) { count += dt; if (count > 2500.0f) { m_FlockHeading = glm::vec3(rndX(), rndY(), rndZ()); count = 0.0f; } size_t distribution = boids.size() / NUMBER_OF_THREADS; futures.push_back(std::async(std::launch::async, &BoidScene::UpdatePartition, this, 0, distribution, dt)); futures.push_back(std::async(std::launch::async, &BoidScene::UpdatePartition, this, distribution + 1, 2 * distribution, dt)); futures.push_back(std::async(std::launch::async, &BoidScene::UpdatePartition, this, 2 * distribution + 1, 3 * distribution, dt)); futures.push_back(std::async(std::launch::async, &BoidScene::UpdatePartition, this, 3 * distribution + 1, 4 * distribution, dt)); futures.push_back(std::async(std::launch::async, &BoidScene::UpdatePartition, this, 4 * distribution + 1, 5 * distribution, dt)); futures.push_back(std::async(std::launch::async, &BoidScene::UpdatePartition, this, 5 * distribution + 1, 6 * distribution, dt)); futures.push_back(std::async(std::launch::async, &BoidScene::UpdatePartition, this, 6 * distribution + 1, 7 * distribution, dt)); futures.push_back(std::async(std::launch::async, &BoidScene::UpdatePartition, this, 7 * distribution + 1, boids.size() - 1, dt)); for (auto& future : futures) { future.get(); } futures.clear(); Scene::UpdateScene(dt); } void BoidScene::UpdatePartition(size_t begin, size_t end, float dt) { glm::vec3 posA, posB; for (unsigned int i = begin; i <= end; ++i) { posA = boids[i]->GetPosition(); for (unsigned int j = 0; j < boids.size(); ++j) { if (i != j) { posB = boids[j]->GetPosition(); float dist = glm::length(posA - posB); //if (dist <= MAX_DISTANCE) { BoidNeighbour bNA; bNA.n = boids[j]; bNA.dist = dist; boids[i]->AddNeighbour(bNA); boids[i]->UpdateFlockHeading(m_FlockHeading); } } } boids[i]->OnUpdateObject(dt); } } #else void BoidScene::UpdateScene(float dt) { count += dt; if (count > 2000.0f) { m_FlockHeading = glm::vec3(rndX(), rndY(), rndZ()); count = 0.0f; } glm::vec3 posA, posB; for (unsigned int i = 0; i < boids.size() - 1; ++i) { posA = boids[i]->GetPosition(); for (unsigned int j = i + 1; j < boids.size(); ++j) { if (i != j) { posB = boids[j]->GetPosition(); float dist = glm::length(posA - posB); BoidNeighbour bNA; bNA.n = boids[j]; bNA.dist = dist; boids[i]->AddNeighbour(bNA); BoidNeighbour bNB; bNB.n = boids[i]; bNB.dist = dist; boids[j]->AddNeighbour(bNB); } } } for (unsigned int i = 0; i < boids.size(); ++i) boids[i]->OnUpdateObject(dt); Scene::UpdateScene(dt); } #endif #else void BoidScene::UpdateScene(float dt) { compute_KNN << <BLOCKS_K2, THREADS_K2 >> >(boidGPU.GetBoidData(), maxBoids, MAX_DISTANCE); updateBoids << <BLOCKS_K2, THREADS_K2 >> >(boidGPU.GetBoidData(), dt, maxBoids); Scene::UpdateScene(dt); } __global__ void compute_KNN(BoidGPU* boid, const uint32_t maxBoids, const float MAX_DISTANCE) { int tid_x = threadIdx.x + blockIdx.x * blockDim.x; if (tid_x >= maxBoids) return; BoidGPU& temp = boid[tid_x]; temp.lastIndex = 0; unsigned int counter = 0; for (unsigned int i = 0; i < maxBoids; ++i) { if (glm::distance(temp.m_Position, boid[i].m_Position) <= MAX_DISTANCE) { if (counter < 50) { temp.neighbours[counter] = &boid[i]; counter++; } else { temp.lastIndex = counter; return; } } } } __device__ void CalcCohesion(BoidGPU& boid, glm::vec3& cohVec) { glm::vec3 avgPos = glm::vec3(0, 0, 0); for (unsigned int i = 0; i < boid.lastIndex; ++i) { avgPos += boid.neighbours[i]->m_Position; } avgPos /= 50.0f; cohVec = (avgPos - boid.m_Position) /* 0.001f*/; float mag = glm::length(cohVec); glm::normalize(cohVec); cohVec *= (0.25f * (mag * 0.001f)); cohVec -= boid.m_Velocity; } __device__ void CalcSeperation(BoidGPU& boid, glm::vec3& sepVec) { for (unsigned int i = 0; i < boid.lastIndex; ++i) { sepVec -= (boid.neighbours[i]->m_Position - boid.m_Position) / glm::distance(boid.neighbours[i]->m_Position, boid.m_Position); } sepVec /= 50.0f; sepVec *= 0.3f; } __device__ void CalcAlignment(BoidGPU& boid, glm::vec3& alignVec) { glm::vec3 avgVel = glm::vec3(0, 0, 0); for (unsigned int i = 0; i < boid.lastIndex; ++i) { avgVel += boid.neighbours[i]->m_Velocity; } avgVel /= 50.0f; alignVec = (avgVel - boid.m_Velocity) * 0.8f; } __global__ void updateBoids(BoidGPU* boid, float dt, const uint32_t maxBoids) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= maxBoids) return; BoidGPU& temp = boid[tid]; glm::vec3 cohVec(0, 0, 0); glm::vec3 sepVec(0, 0, 0); glm::vec3 alignVec(0, 0, 0); CalcCohesion(temp, cohVec); CalcSeperation(temp, sepVec); CalcAlignment(temp, alignVec); temp.m_Velocity = cohVec + sepVec + alignVec; temp.m_Position += (temp.m_Velocity * dt); temp.m_WorldTransform = glm::translate(glm::mat4(1.0f), temp.m_Position); temp.lastIndex = 0; } #endif<file_sep>#include "OGLRenderer.h" #include "Shader.h" #include "Scene.h" #include <glm/gtc/type_ptr.hpp> OGLRenderer* OGLRenderer::instance = nullptr; unsigned int OGLRenderer::num_boids = 0; OGLRenderer* OGLRenderer::Instance() { if (!instance) { instance = new OGLRenderer(); } return instance; } void OGLRenderer::Release() { if (instance) { delete instance; instance = nullptr; } } OGLRenderer::OGLRenderer() { init_glfw(); init_glew(); currentScene = nullptr; currentShader = nullptr; glGenBuffers(1, &SSBO); glBindBuffer(GL_SHADER_STORAGE_BUFFER, SSBO); glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(glm::mat4) * num_boids, 0, GL_DYNAMIC_COPY); glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 6, SSBO); glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); } OGLRenderer::~OGLRenderer() { if (window) { glfwDestroyWindow(window); } glDeleteBuffers(1, &SSBO); } void OGLRenderer::init_glew() { glewExperimental = GL_TRUE; // Initialise GLEW if (glewInit() != GLEW_OK) { std::cerr << "Error: Failed to initialise GLEW." << std::endl; system("pause"); exit(1); } glEnable(GL_DEPTH_TEST); glEnable(GL_DEPTH_CLAMP); glDepthFunc(GL_LEQUAL); glClearDepth(1.0f); //glEnable(GL_CULL_FACE); //glCullFace(GL_BACK); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glClearColor(0.0f, 0.0f, 0.0f, 1.0f); } void OGLRenderer::init_glfw() { // Initialise GLFW if (!glfwInit()) { std::cerr << "Error: Failed to initialise GLFW." << std::endl; system("pause"); exit(1); } //Set window variables MODE = glfwGetVideoMode(glfwGetPrimaryMonitor()); glfwWindowHint(GLFW_SAMPLES, 0); // no AA glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); //OpenGL 4.5 glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 5); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_COMPAT_PROFILE); glfwWindowHint(GLFW_DEPTH_BITS, 32); // Open a window and create its OpenGL context window = glfwCreateWindow(WIDTH, HEIGHT, TITLE, nullptr, nullptr); if (window == nullptr) { std::cerr << "Error: Failed to open GLFW window. OpenGL 4.2 Required." << std::endl; system("pause"); glfwTerminate(); exit(1); } glfwMakeContextCurrent(window); glfwSetInputMode(window, GLFW_STICKY_KEYS | GLFW_CURSOR_DISABLED, GL_TRUE); } void OGLRenderer::SetCurrentShader(Shader* s) { if (s != currentShader) { currentShader = s; glUseProgram(currentShader->GetShaderProgram()); glUniformMatrix4fv(currentShader->GetVPMatrixLoc(), 1, GL_FALSE, (float*)currentScene->GetCamera()->GetVP()); } } void OGLRenderer::Render(Timer* gt) { glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); if (currentScene) { currentScene->UpdateScene(gt->getLast()); elapsedCPU += gt->split(); currentScene->RenderScene(); } currentShader = nullptr; glfwSwapBuffers(window); } bool OGLRenderer::ShouldClose() { return (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS) && (glfwWindowShouldClose(window) == 0); }<file_sep>#pragma once #include <vector> #include "Entity.h" #include "Camera.h" class Scene { public: Scene(); virtual ~Scene(); virtual void RenderScene(); virtual void UpdateScene(float dt); Entity* GetOpaqueObject(unsigned int i); Entity* GetTransparentObject(unsigned int i); Camera* GetCamera() { return cam; } void AddEntity(Entity* e); protected: std::vector<Entity*> transparentObjects; std::vector<Entity*> opaqueObjects; Camera* cam; };<file_sep>#include "Mesh.h" #include <GL/glew.h> Mesh::Mesh(void) { m_NumVertices = 0; m_PrimitiveType = 0; m_NumIndices = 0; m_Vertices = nullptr; m_TextureCoords = nullptr; m_Normals = nullptr; m_Tangents = nullptr; m_Indices = nullptr; m_Children.clear(); arrayObject = 0; for (int i = 0; i < MAX_BUFFER; ++i) bufferObject[i] = 0; } Mesh::Mesh(uint32_t numVertices, glm::vec3* vertices, glm::vec2* texCoords, glm::vec3* normals, glm::vec3* tangents, uint32_t numIndices, uint32_t* indices) { m_Children.clear(); m_NumVertices = numVertices; m_NumIndices = numIndices; m_Vertices = vertices; m_TextureCoords = texCoords; m_Normals = normals; m_Tangents = tangents; m_Indices = indices; m_PrimitiveType = GL_TRIANGLE_STRIP; arrayObject = 0; for (unsigned int i = 0; i < MAX_BUFFER; ++i) bufferObject[i] = 0; BufferData(); } Mesh::~Mesh(void) { Clean(); glDeleteBuffers(MAX_BUFFER, bufferObject); if (multiDrawArray) { delete multiDrawArray; multiDrawArray = nullptr; } } void Mesh::Clean() { if (m_Vertices) { delete[] m_Vertices; m_Vertices = nullptr; } if (m_Indices) { delete[] m_Indices; m_Indices = nullptr; } if (m_TextureCoords) { delete[] m_TextureCoords; m_TextureCoords = nullptr; } if (m_Tangents) { delete[] m_Tangents; m_Tangents = nullptr; } if (m_Normals) { delete[] m_Normals; m_Normals = nullptr; } } Mesh* Mesh::GenerateSphere(uint32_t height, uint32_t width) { Mesh* mesh = new Mesh(); mesh->m_NumVertices = height * width; mesh->m_NumIndices = (height * width) * 6; mesh->m_PrimitiveType = GL_TRIANGLE_STRIP; mesh->m_Vertices = new glm::vec3[mesh->m_NumVertices]; mesh->m_Indices = new uint32_t[mesh->m_NumIndices]; //Iterates through angles of phi and theta to produce sphere. for (uint32_t y = 0; y < height; y++) { double phi = (double(y) * PI) / (height - 1); for (uint32_t x = 0; x < width; x++) { double theta = (double(x) * (2 * PI)) / (width - 1); mesh->m_Vertices[(y * x) + x] = glm::normalize(glm::vec3(sin(phi) * cos(theta), cos(phi), sin(phi) * sin(theta))); } } for (uint32_t i = 0; i < height - 1; ++i) { mesh->m_Indices[i] = (i * width); for (uint32_t j = 0; j < width; ++j) { mesh->m_Indices[(i * j) + j + 1] = (i * width + j); mesh->m_Indices[(i * j) + j + 2] = ((i + 1) * width + j); } mesh->m_Indices[i + 2] = ((i + 1) * width + (width - 1)); } return mesh; } Mesh* Mesh::GenerateTriangle(bool multiDraw, unsigned int num_elements) { Mesh* mesh = new Mesh(); mesh->m_NumVertices = 3; mesh->m_NumIndices = 3; mesh->m_PrimitiveType = GL_TRIANGLE_STRIP; mesh->m_Vertices = new glm::vec3[mesh->m_NumVertices]; mesh->m_TextureCoords = new glm::vec2[mesh->m_NumVertices]; mesh->m_Normals = new glm::vec3[mesh->m_NumVertices]; mesh->m_Tangents = new glm::vec3[mesh->m_NumVertices]; mesh->m_Indices = new uint32_t[mesh->m_NumVertices]; mesh->m_Vertices[0] = glm::vec3(0.0f, 0.5f, 0.0f); mesh->m_Vertices[1] = glm::vec3(0.5f, -0.5f, 0.0f); mesh->m_Vertices[2] = glm::vec3(-0.5f, -0.5f, 0.0f); mesh->m_TextureCoords[0] = glm::vec2(0.5f, 0.0f); mesh->m_TextureCoords[1] = glm::vec2(1.0f, 1.0f); mesh->m_TextureCoords[2] = glm::vec2(0.0f, 1.0f); for (unsigned int i = 0; i < mesh->m_NumVertices; ++i) { mesh->m_Normals[i] = glm::vec3(0, 0, 1); mesh->m_Tangents[i] = glm::vec3(1, 0, 0); mesh->m_Indices[i] = i; } mesh->numElements = num_elements; mesh->BufferData(multiDraw); return mesh; } Mesh* Mesh::GenerateQuad(glm::vec2 texCoords) { Mesh* m = new Mesh(); m->m_NumVertices = 4; m->m_NumIndices = 4; m->m_PrimitiveType = GL_TRIANGLE_STRIP; m->m_Vertices = new glm::vec3[m->m_NumVertices]; m->m_Indices = new uint32_t[m->m_NumIndices]; m->m_TextureCoords = new glm::vec2[m->m_NumVertices]; m->m_Normals = new glm::vec3[m->m_NumVertices]; m->m_Tangents = new glm::vec3[m->m_NumVertices]; m->m_Vertices[0] = glm::vec3(-1.0f, 1.0f, 0.0f); m->m_Vertices[1] = glm::vec3(-1.0f, -1.0f, 0.0f); m->m_Vertices[2] = glm::vec3(1.0f, 1.0f, 0.0f); m->m_Vertices[3] = glm::vec3(1.0f, -1.0f, 0.0f); m->m_TextureCoords[0] = glm::vec2(0.0f, texCoords.y); m->m_TextureCoords[1] = glm::vec2(0.0f, 0.0f); m->m_TextureCoords[2] = glm::vec2(texCoords.x, texCoords.y); m->m_TextureCoords[3] = glm::vec2(texCoords.x, 0.0f); for (unsigned int i = 0; i < m->m_NumIndices; ++i) { m->m_Normals[i] = glm::vec3(0.0f, 0.0f, -1.0f); m->m_Tangents[i] = glm::vec3(1.0f, 0.0f, 0.0f); m->m_Indices[i] = i; } m->BufferData(); return m; } Mesh* Mesh::GenerateQuadAlt() { Mesh* m = new Mesh(); m->m_NumVertices = 4; m->m_NumIndices = 6; m->m_PrimitiveType = GL_TRIANGLE_STRIP; m->m_Vertices = new glm::vec3[m->m_NumVertices]; m->m_Indices = new uint32_t[m->m_NumIndices]; m->m_TextureCoords = new glm::vec2[m->m_NumVertices]; m->m_Normals = new glm::vec3[m->m_NumVertices]; m->m_Tangents = new glm::vec3[m->m_NumVertices]; m->m_Vertices[0] = glm::vec3(0.0f, 0.0f, 0.0f); m->m_Vertices[1] = glm::vec3(0.0f, 1.0f, 0.0f); m->m_Vertices[2] = glm::vec3(1.0f, 0.0f, 0.0f); m->m_Vertices[3] = glm::vec3(1.0f, 1.0f, 0.0f); m->m_TextureCoords[0] = glm::vec2(0.0f, 0.0f); m->m_TextureCoords[1] = glm::vec2(0.0f, 1.0f); m->m_TextureCoords[2] = glm::vec2(1.0f, 0.0f); m->m_TextureCoords[3] = glm::vec2(1.0f, 1.0f); for (unsigned int i = 0; i < m->m_NumIndices; ++i) { m->m_Normals[i] = glm::vec3(0.0f, 0.0f, -1.0f); m->m_Tangents[i] = glm::vec3(1.0f, 0.0f, 0.0f); m->m_Indices[i] = i; } m->BufferData(); return m; } void Mesh::GenerateNormals() { if (!m_Normals) m_Normals = new glm::vec3[m_NumVertices]; else return; for (unsigned int i = 0; i < m_NumVertices; ++i) m_Normals[i] = glm::vec3(0, 0, 0); if (m_Indices) { for (unsigned int i = 0; i < m_NumIndices; i += 3) { int a = m_Indices[i]; int b = m_Indices[i + 1]; int c = m_Indices[i + 2]; glm::vec3 normal = glm::cross(m_Vertices[b] - m_Vertices[a], m_Vertices[c] - m_Vertices[a]); m_Normals[a] += normal; m_Normals[b] += normal; m_Normals[c] += normal; } } else { for (unsigned int i = 0; i < m_NumVertices; i += 3) { glm::vec3& a = m_Vertices[i]; glm::vec3& b = m_Vertices[i + 1]; glm::vec3& c = m_Vertices[i + 2]; glm::vec3 normal = glm::cross(a - b, a - c); m_Normals[i] = normal; m_Normals[i + 1] = normal; m_Normals[i + 2] = normal; } } for (unsigned int i = 0; i < m_NumVertices; ++i) glm::normalize(m_Normals[i]); } void Mesh::GenerateTangents() { if (!m_TextureCoords) return; if (m_Tangents) return; if (!m_Tangents) m_Tangents = new glm::vec3[m_NumVertices]; else return; for (unsigned int i = 0; i < m_NumVertices; ++i) m_Tangents[i] = glm::vec3(0, 0, 0); if (m_Indices) { for (unsigned int i = 0; i < m_NumIndices; i += 3) { int a = m_Indices[i]; int b = m_Indices[i + 1]; int c = m_Indices[i + 2]; glm::vec3 tangent = GenerateTangent(m_Vertices[a], m_Vertices[b], m_Vertices[c], m_TextureCoords[a], m_TextureCoords[b], m_TextureCoords[c]); m_Tangents[a] += tangent; m_Tangents[b] += tangent; m_Tangents[c] += tangent; } } else { for (unsigned int i = 0; i < m_NumVertices; i += 3) { glm::vec3 tangent = GenerateTangent(m_Vertices[i], m_Vertices[i + 1], m_Vertices[i + 2], m_TextureCoords[i], m_TextureCoords[i + 1], m_TextureCoords[i + 2]); m_Tangents[i] += tangent; m_Tangents[i + 1] += tangent; m_Tangents[i + 2] += tangent; } } for (unsigned int i = 0; i < m_NumVertices; ++i) glm::normalize(m_Tangents[i]); } glm::vec3 Mesh::GenerateTangent(const glm::vec3& a, const glm::vec3& b, const glm::vec3& c, const glm::vec2& ta, const glm::vec2& tb, const glm::vec2& tc) { glm::vec2 coord1 = tb - ta; glm::vec2 coord2 = tc - ta; glm::vec3 vertex1 = b - a; glm::vec3 vertex2 = c - a; glm::vec3 axis = glm::vec3(vertex1*coord2.y - vertex2*coord1.y); float factor = 1.0f / (coord1.x * coord2.y - coord2.x * coord1.y); return axis * factor; } void Mesh::BufferData(bool multiDraw) { glGenVertexArrays(1, &arrayObject); //GenerateNormals(); //GenerateTangents(); glBindVertexArray(arrayObject); //Buffer vertex data glGenBuffers(1, &bufferObject[VERTEX_BUFFER]); glBindBuffer(GL_ARRAY_BUFFER, bufferObject[VERTEX_BUFFER]); glBufferData(GL_ARRAY_BUFFER, m_NumVertices * sizeof(glm::vec3), m_Vertices, GL_STATIC_DRAW); glVertexAttribPointer(VERTEX_BUFFER, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), 0); glEnableVertexAttribArray(VERTEX_BUFFER); //Buffer texture data /*if (m_TextureCoords) { glGenBuffers(1, &bufferObject[TEXTURE_BUFFER]); glBindBuffer(GL_ARRAY_BUFFER, bufferObject[TEXTURE_BUFFER]); glBufferData(GL_ARRAY_BUFFER, m_NumVertices * sizeof(Vec2Graphics), m_TextureCoords, GL_STATIC_DRAW); glVertexAttribPointer(TEXTURE_BUFFER, 2, GL_FLOAT, GL_FALSE, sizeof(Vec2Graphics), 0); glEnableVertexAttribArray(TEXTURE_BUFFER); } //Buffer normal data if (m_Normals) { glGenBuffers(1, &bufferObject[NORMAL_BUFFER]); glBindBuffer(GL_ARRAY_BUFFER, bufferObject[NORMAL_BUFFER]); glBufferData(GL_ARRAY_BUFFER, m_NumVertices * sizeof(Vec3Graphics), m_Normals, GL_STATIC_DRAW); glVertexAttribPointer(NORMAL_BUFFER, 3, GL_FLOAT, GL_FALSE, sizeof(Vec3Graphics), 0); glEnableVertexAttribArray(NORMAL_BUFFER); } //Buffer tangent data if (m_Tangents) { glGenBuffers(1, &bufferObject[TANGENT_BUFFER]); glBindBuffer(GL_ARRAY_BUFFER, bufferObject[TANGENT_BUFFER]); glBufferData(GL_ARRAY_BUFFER, m_NumVertices * sizeof(Vec3Graphics), m_Tangents, GL_STATIC_DRAW); glVertexAttribPointer(TANGENT_BUFFER, 3, GL_FLOAT, GL_FALSE, sizeof(Vec3Graphics), 0); glEnableVertexAttribArray(TANGENT_BUFFER); }*/ //buffer index data if (m_Indices) { glGenBuffers(1, &bufferObject[INDEX_BUFFER]); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferObject[INDEX_BUFFER]); glBufferData(GL_ELEMENT_ARRAY_BUFFER, m_NumIndices * sizeof(unsigned int), m_Indices, GL_STATIC_DRAW); } if (multiDraw) { multiDrawArray = new DrawElementsCommand[numElements]; for (unsigned int i = 0; i < numElements; ++i) { multiDrawArray[i].vertexCount = m_NumVertices; multiDrawArray[i].instanceCount = 1; multiDrawArray[i].firstIndex = 0; multiDrawArray[i].baseVertex = 0; multiDrawArray[i].baseInstance = i; } glGenBuffers(1, &bufferObject[INDIRECT_BUFFER]); glBindBuffer(GL_DRAW_INDIRECT_BUFFER, bufferObject[INDIRECT_BUFFER]); glBufferData(GL_DRAW_INDIRECT_BUFFER, sizeof(DrawElementsCommand) * numElements, multiDrawArray, GL_STATIC_DRAW); } Clean(); for (auto& child : m_Children) child->BufferData(); glBindVertexArray(0); } void Mesh::Draw() { glBindVertexArray(arrayObject); if (bufferObject[INDIRECT_BUFFER]) { //Batch draw all elements - this function produces a single draw call glMultiDrawElementsIndirect(GL_TRIANGLE_STRIP, GL_UNSIGNED_INT, 0, numElements, 0); } else { if (bufferObject[INDEX_BUFFER]) { glDrawElements(m_PrimitiveType, m_NumIndices, GL_UNSIGNED_INT, 0); } else { glDrawArrays(m_PrimitiveType, 0, m_NumVertices); } } for (auto child : m_Children) child->Draw(); }<file_sep>#include "Shader.h" Shader::Shader() { shaderProgram = 0; modelMatrixLoc = 0; VPMatrixLoc = 0; } Shader::Shader(std::string vertex_file, std::string frag_file, std::string geo_file, std::string tcs_file, std::string tes_file) { readFile(vertex_file, VERTEX_SHADER_BUFFER); readFile(frag_file, FRAG_SHADER_BUFFER); if (!geo_file.empty()) { readFile(geo_file, GEO_SHADER_BUFFER); } if (!tcs_file.empty() && !tes_file.empty()) { readFile(tcs_file, TCS_SHADER_BUFFER); readFile(tes_file, TES_SHADER_BUFFER); } init_shader(); SetDefaultAttributes(); } Shader::~Shader() { //Destroy shader glUseProgram(0); glDetachShader(shaderProgram, *VERTEX_SHADER_BUFFER.c_str()); glDetachShader(shaderProgram, *FRAG_SHADER_BUFFER.c_str()); glDeleteShader(*VERTEX_SHADER_BUFFER.c_str()); glDeleteShader(*FRAG_SHADER_BUFFER.c_str()); glDeleteProgram(shaderProgram); } void Shader::init_shader() { GLint shaderTest = 0; GLchar log[1024] = { 0 }; shaderProgram = glCreateProgram(); if (!shaderProgram) { std::cerr << "Error: Failed to create shader program." << std::endl; system("pause"); exit(1); } createShader(shaderProgram, VERTEX_SHADER_BUFFER.c_str(), GL_VERTEX_SHADER); createShader(shaderProgram, FRAG_SHADER_BUFFER.c_str(), GL_FRAGMENT_SHADER); if (!GEO_SHADER_BUFFER.empty()) { createShader(shaderProgram, GEO_SHADER_BUFFER.c_str(), GL_GEOMETRY_SHADER); } if (!TCS_SHADER_BUFFER.empty() && !TES_SHADER_BUFFER.empty()) { createShader(shaderProgram, TCS_SHADER_BUFFER.c_str(), GL_TESS_CONTROL_SHADER); createShader(shaderProgram, TES_SHADER_BUFFER.c_str(), GL_TESS_EVALUATION_SHADER); } glLinkProgram(shaderProgram); glGetProgramiv(shaderProgram, GL_LINK_STATUS, &shaderTest); if (!shaderTest) { glGetProgramInfoLog(shaderProgram, sizeof(log), NULL, log); std::cerr << "Failed to link shader program:\n" << log << std::endl; system("pause"); exit(1); } shaderTest = 0; glValidateProgram(shaderProgram); glGetProgramiv(shaderProgram, GL_VALIDATE_STATUS, &shaderTest); if (!shaderTest) { glGetProgramInfoLog(shaderProgram, sizeof(log), NULL, log); std::cerr << "Failed to validate shader program:\n" << log << std::endl; system("pause"); exit(1); } glUseProgram(shaderProgram); } void Shader::createShader(GLuint program, const char *shader, GLenum type) { GLuint shaderObj = glCreateShader(type); if (!shaderObj) { std::cerr << "Error: Failed to create shader object:\n" << type << std::endl; system("pause"); exit(1); } const GLchar *shaderPointer = shader; GLint length[] = { strlen(shader) }; glShaderSource(shaderObj, 1, &shaderPointer, length); glCompileShader(shaderObj); GLint shaderTest = 0; glGetShaderiv(shaderObj, GL_COMPILE_STATUS, &shaderTest); if (!shaderTest) { GLchar log[1024]; glGetShaderInfoLog(shaderObj, sizeof(log), NULL, log); std::cerr << "Failed to compile shader:\n" << log << std::endl; system("pause"); exit(1); } glAttachShader(program, shaderObj); } void Shader::readFile(const std::string &file, std::string& buf) { std::ifstream ifs(file); if (!ifs) { std::cerr << "Cannot find file: " << file.c_str() << std::endl; system("pause"); exit(1); } //Obtain file size & allocate memory ifs.seekg(0, std::ios::end); buf.reserve(ifs.tellg()); ifs.seekg(0, std::ios::beg); //Read file buf.assign((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>()); //Close file stream. ifs.close(); } void Shader::SetDefaultAttributes() { modelMatrixLoc = glGetUniformLocation(shaderProgram, "modelMatrix"); VPMatrixLoc = glGetUniformLocation(shaderProgram, "VPMatrix"); glBindAttribLocation(shaderProgram, VERTEX_BUFFER, "position"); //glBindAttribLocation(shaderProgram, COLOUR_BUFFER, "colour"); //glBindAttribLocation(shaderProgram, NORMAL_BUFFER, "normal"); //glBindAttribLocation(shaderProgram, TANGENT_BUFFER, "tangent"); //glBindAttribLocation(shaderProgram, TEXTURE_BUFFER, "texCoord"); }<file_sep>#pragma once #include"Common.h" #include "RenderComponent.h" class Entity { friend class Scene; public: Entity(const std::string& name = std::to_string(id)); virtual ~Entity(); Entity* FindEntity(const std::string& name); void AddChildObject(Entity* child); inline const std::string& GetName() { return m_Name; } inline std::vector<Entity*>& GetChildren() { return m_Children; } inline void SetWorldTransform(const glm::mat4& transform) { m_WorldTransform = transform; m_WorldTransformPtr = &m_WorldTransform; } inline void SetWorldTransform(glm::mat4* transform) { m_WorldTransformPtr = transform; } inline const glm::mat4* GetWorldTransform() const { return m_WorldTransformPtr; } inline void SetLocalTransform(const glm::mat4& transform) { m_LocalTransform = transform; } inline const glm::mat4& GetLocalTransform() const { return m_LocalTransform; } inline void SetRenderComponent(RenderComponent* comp) { m_RenderComponent = comp; m_RenderComponent->SetParent(this); } inline RenderComponent* GetRenderComponent() const { return m_RenderComponent; } protected: virtual void OnRenderObject(); //Handles OpenGL calls to Render the object virtual void OnUpdateObject(float dt); //Override to handle things like AI etc on update loop std::string m_Name; Entity* m_Parent; std::vector<Entity*> m_Children; RenderComponent* m_RenderComponent; glm::mat4 m_WorldTransform; glm::mat4* m_WorldTransformPtr; glm::mat4 m_LocalTransform; float m_CamDist; //For ordering of rendering lists. static unsigned int id; };<file_sep>#include "Common.h" #include "OGLRenderer.h" #include "BoidScene.h" #include "Common.h" #include "Shader.h" #include "Mesh.h" //Use win32 & Release when compiling for CPU. //Use x64 & Release when compiling for the GPU. int main(void) { int numBoids; std::cout << "Enter the number of Boids to simulate... (Max 1250048)" << std::endl; std::cin >> numBoids; #if CUDA //Calculate closest multiple of threads per block to supplied input value. numBoids = ceilf(float(numBoids) / THREADS_PER_BLOCK) * THREADS_PER_BLOCK; #endif OGLRenderer::num_boids = numBoids; OGLRenderer* renderer = OGLRenderer::Instance(); #if !CUDA Shader* simpleShader = new Shader(SHADER_DIR"vertex_shader.glsl", SHADER_DIR"frag_shader.glsl"); Mesh* triMesh = Mesh::GenerateTriangle(false); #else Shader* simpleShader = new Shader(SHADER_DIR"vertex_shader_multiDraw.glsl", SHADER_DIR"frag_shader.glsl"); Mesh* triMesh = Mesh::GenerateTriangle(true, numBoids); #endif BoidScene* boidScene = new BoidScene(numBoids, simpleShader, triMesh); Timer* gt = new Timer; renderer->SetCurrentScene(boidScene); float frameCount = 0.0f; //Main loop. while (renderer->ShouldClose()) //Check if the ESC key was pressed or the window was closed { frameCount += 1.0f; gt->startTimer(); renderer->Render(gt); glfwPollEvents(); gt->stopTimer(); } //Debug timings #if CUDA float avgCudaComputeTime = boidScene->GetCUDAElapsedTime() / frameCount; std::cout << "CUDA Kernel Average Compute Time: " << avgCudaComputeTime << "ms" << std::endl; #endif std::cout << "CPU Average Compute Time: " << renderer->GetElapsed() / frameCount << "ms" << std::endl; delete gt; delete triMesh; delete simpleShader; delete boidScene; OGLRenderer::Release(); system("pause"); return 0; }<file_sep>//Author: <NAME> //This class loads shaders from file, compiles and links them into a working shader program, using OpenGL. #include <GL/glew.h> #include <iostream> #include <string> #include <fstream> #include "Common.h" class Shader { public: Shader(); Shader(std::string vertex_file, std::string frag_file, std::string geo_file = "", std::string tcs_file = "", std::string tes_file = ""); ~Shader(); inline GLuint GetShaderProgram() const { return shaderProgram; } inline GLuint GetModelMatrixLoc() const { return modelMatrixLoc; } inline GLuint GetVPMatrixLoc() const { return VPMatrixLoc; } private: void init_shader(); void SetDefaultAttributes(); void createShader(GLuint program, const char *shader, GLenum type); void readFile(const std::string &file, std::string& buf); GLuint shaderProgram; GLuint modelMatrixLoc; GLuint VPMatrixLoc; std::string VERTEX_SHADER_BUFFER; std::string FRAG_SHADER_BUFFER; std::string TCS_SHADER_BUFFER; std::string TES_SHADER_BUFFER; std::string GEO_SHADER_BUFFER; };<file_sep>#include "Entity.h" unsigned int Entity::id = 0; Entity::Entity(const std::string& name) { m_Parent = nullptr; m_Name = name; m_CamDist = 0.0f; id++; m_LocalTransform = glm::mat4(1.0f); m_WorldTransform = glm::mat4(1.0f); } Entity::~Entity() { /*if (m_RenderComponent) { delete m_RenderComponent; m_RenderComponent = nullptr; }*/ } Entity* Entity::FindEntity(const std::string& name) { //Has this object got the same name? if (m_Name.compare(name) == 0) { return this; } //Recursively search ALL child objects and return the first one matching the given name for (auto child : m_Children) { //Has the object in question got the same name? Entity* cObj = child->FindEntity(name); if (cObj) { return cObj; } } //Object not found with the given name return nullptr; } void Entity::AddChildObject(Entity* child) { m_Children.push_back(child); child->m_Parent = this; } void Entity::OnRenderObject() { if (m_RenderComponent) m_RenderComponent->Draw(); } void Entity::OnUpdateObject(float dt) { m_WorldTransform = m_LocalTransform; if (m_Parent) m_WorldTransform = m_Parent->m_WorldTransform * m_WorldTransform; m_WorldTransformPtr = &m_WorldTransform; for (auto child : m_Children) { child->OnUpdateObject(dt); } }<file_sep>#pragma once #include "Common.h" struct DrawElementsCommand { unsigned int vertexCount; unsigned int instanceCount; unsigned int firstIndex; unsigned int baseVertex; unsigned int baseInstance; }; class Mesh { public: Mesh(void); Mesh(uint32_t numVertices, glm::vec3* vertices, glm::vec2* texCoords, glm::vec3* normals, glm::vec3* tangents, uint32_t numIndices, uint32_t* indices); virtual ~Mesh(void); void Draw(); inline void AddChild(Mesh* m) { m_Children.push_back(m); } inline const std::vector<Mesh*>& GetChildren() { return m_Children; } //Generates a single triangle, with RGB colours static Mesh* GenerateTriangle(bool multiDraw = false, unsigned int num_elements = 0); static Mesh* GenerateSphere(uint32_t height, uint32_t width); //Generates a single white quad, going from -1 to 1 on the x and z axis. static Mesh* GenerateQuad(glm::vec2 texCoords = glm::vec2(1.0f, 1.0f)); static Mesh* GenerateQuadAlt(); //inline const glm::vec3& GetColour(uint32_t index) const { return m_Colours[index]; } inline uint32_t GetNumVertices() { return m_NumVertices; } inline uint32_t GetNumIndices() { return m_NumIndices; } inline glm::vec3* GetVertices() { return m_Vertices; } inline glm::vec3* GetNormals() { return m_Normals; } inline glm::vec3* GetTangents() { return m_Tangents; } inline glm::vec2* GetTextureCoords() { return m_TextureCoords; } inline uint32_t* GetIndices() { return m_Indices;} //Generates normals for all facets. Assumes geometry type is GL_TRIANGLES... void GenerateNormals(); //Generates tangents for all facets. Assumes geometry type is GL_TRIANGLES... void GenerateTangents(); protected: //Buffers all VBO data into graphics memory. Required before drawing! void BufferData(bool multiDraw = false); //Helper function for GenerateTangents glm::vec3 GenerateTangent(const glm::vec3& a, const glm::vec3& b, const glm::vec3& c, const glm::vec2& ta, const glm::vec2& tb, const glm::vec2& tc); void Clean(); //Number of vertices for this mesh uint32_t m_NumVertices; //Number of indices for this mesh uint32_t m_NumIndices; glm::vec3* m_Vertices; //Pointer to vertex texture coordinate attribute data glm::vec2* m_TextureCoords; //Pointer to vertex normals attribute data glm::vec3* m_Normals; //Pointer to vertex tangents attribute data glm::vec3* m_Tangents; //Pointer to vertex indices attribute data uint32_t* m_Indices; unsigned int m_PrimitiveType; std::vector<Mesh*> m_Children; //VAO for this mesh unsigned int arrayObject; //VBOs for this mesh unsigned int bufferObject[MAX_BUFFER]; unsigned int numElements = 0; //DrawElementsCommand multiDrawArray[NUM_BOIDS]; DrawElementsCommand* multiDrawArray = nullptr; };<file_sep>//Author: <NAME> - 120266942 //Provides an accurate timer for the game engine. #pragma once #include <Windows.h> class Timer { public: Timer(); void startTimer(); void stopTimer(); double split(); double getLast(); private: LARGE_INTEGER frequency; LARGE_INTEGER start; LARGE_INTEGER end; double last = 1.0; };<file_sep>//Author: <NAME> //This class polls input from the keyboard & mouse, it also sets the model view projection matrix. #pragma once #include "Common.h" struct GLFWwindow; class Camera { public: Camera(GLFWwindow* window, unsigned int window_height, unsigned int window_width, float FOV, float nearPlane, float farPlane); ~Camera(); //Setup MVP matrix void UpdateCamera(float dt); inline const glm::mat4* GetVP() const { return &VP; } private: void pollMouse(float dt); void pollKeyBoard(float dt); //camera transform variables float pitch = 0.0; float yaw = 0.0; const float speed = 0.08f; const float mouseSpeed = 0.003f; glm::vec3 position; glm::vec3 target; glm::vec3 look; glm::vec3 up; glm::vec3 right; //Projection, model and view matrices glm::mat4 Projection; glm::mat4 View; glm::mat4 VP; //Enviroment variables GLFWwindow* window; unsigned int window_height; unsigned int window_width; float half_height; float half_width; };<file_sep>#pragma once class Entity; class Mesh; class Shader; class RenderComponent { public: RenderComponent(Mesh* mesh, Shader* shader); ~RenderComponent(); void Draw(); void SetParent(Entity* e); private: Entity* m_Entity; Mesh* m_Mesh; Shader* m_Shader; };<file_sep>#include "Timer.h" Timer::Timer() { QueryPerformanceFrequency(&frequency); } void Timer::startTimer() { QueryPerformanceCounter(&start); } void Timer::stopTimer() { QueryPerformanceCounter(&end); last = (end.QuadPart - start.QuadPart) * 1000.0 / frequency.QuadPart; } double Timer::getLast() { return last; } double Timer::split() { LARGE_INTEGER tmp; QueryPerformanceCounter(&tmp); return (tmp.QuadPart - start.QuadPart) * 1000.0 / frequency.QuadPart; }
e58ef144417aa63c2318ca4e3a48a7058dd3bc4b
[ "C", "C++" ]
23
C++
shaunno94/MComp_Dissertation
350fed6ff5369e0a98704741228ce4d6a9a336dd
9040e4bfbd95ad233ae1e78dcbb264521f50091d
refs/heads/master
<file_sep>// Copyright 2019 <NAME> // Author: <NAME> <<EMAIL>> // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #include <papi.h> #include <stdio.h> #define CODEGEN_DEFINE(var) codegen_define(#var, var) void codegen_define(const char *name, const int val) { printf("pub const %s: ::std::os::raw::c_int = %d;\n", name, val); } void codegen() { CODEGEN_DEFINE(PAPI_VER_CURRENT); CODEGEN_DEFINE(PAPI_NATIVE_MASK); } int main(void) { codegen(); return 0; } <file_sep>// Copyright 2018-2019 German Research Center for Artificial Intelligence (DFKI) // Copyright 2019 <NAME> // // Authors: // <NAME> <<EMAIL>> // <NAME> <<EMAIL>> // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] include!(concat!(env!("OUT_DIR"), "/bindings.rs")); include!(concat!(env!("OUT_DIR"), "/codegen.rs")); #[cfg(test)] mod tests { use lazy_static::lazy_static; lazy_static! { static ref IS_PAPI_INITED: bool = { do_papi_init(); true }; } use super::*; fn do_papi_init() { unsafe { let ver = PAPI_library_init(PAPI_VER_CURRENT); assert_eq!(ver, PAPI_VER_CURRENT); } let is_inited = unsafe { PAPI_is_initialized() }; assert_ne!(is_inited, PAPI_NOT_INITED as i32); } #[test] fn get_real_cyc() { let cycles = unsafe { PAPI_get_real_cyc() }; assert!(cycles >= 0); } #[test] fn get_num_counters() { let num_hwcntrs = unsafe { PAPI_num_counters() }; assert!(num_hwcntrs >= 0); } } <file_sep>// Copyright 2018-2019 German Research Center for Artificial Intelligence (DFKI) // Copyright 2019 <NAME> // // Authors: // <NAME> <<EMAIL>> // <NAME> <<EMAIL>> // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #include <papi.h> <file_sep>papi-sys ======== ## Usage First, add the following to your `Cargo.toml`: ```toml [dependencies] papi-sys = "0.1.0" ``` Before building, ensure that PAPI is installed on your system. ## What is papi-sys? The purpose of this crate is to provide 1:1 bindings for papi.h. PAPI is a library that provides a consistent interface to hardware performance counters. Visit the [PAPI website](http://icl.utk.edu/papi) for more information. Note that this crate does not provide a high-level interface to PAPI. ## Environment Variables There are two environment variables to specify a custom PAPI library path: - `PAPI_PREFIX`: required to generate `bindings.rs` - `LD_LIBRARY_PATH`: required to dynamically link `libpapi.so` Let's assume you installed PAPI in `/opt/papi/5.7.0/`, then you can test by ```bash $ PAPI_PREFIX=/opt/papi/5.7.0/ LD_LIBRARY_PATH=/opt/papi/5.7.0/lib:$LD_LIBRARY_PATH cargo test ``` To avoid setting `LD_LIBRARY_PATH`, you can configure the search path globally by running: ```bash $ sudo echo "/opt/papi/5.7.0/" > /etc/ld.so.conf.d/papi.conf $ sudo ldconfig ``` ## Platforms The following platforms are currently tested: * `x86_64-unknown-linux-gnu` * `powerpc64le-unknown-linux-gnu` ## Dependencies The following dependency versions are currently required: * `rustc` >= 1.36 * `gcc` >= 4.8 or `clang` >= 3.8 ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. <file_sep>// Copyright 2018-2019 German Research Center for Artificial Intelligence (DFKI) // Copyright 2019 <NAME> // // Authors: // <NAME> <<EMAIL>> // <NAME> <<EMAIL>> // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::env; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::process::Command; fn main() -> std::io::Result<()> { let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); let papi_prefix_path = env::var("PAPI_PREFIX").map(|p| PathBuf::from(p)).ok(); let clang_args = if let Some(p) = papi_prefix_path { println!("cargo:rustc-link-search={}", p.join("lib").display()); println!("cargo:rust-flags=-L{}", p.join("lib").display()); vec![ format!("-I{}", p.join("include").display()), format!("-L{}", p.join("lib").display()), ] } else { Vec::new() }; println!("cargo:rustc-link-lib=papi"); bindgen::builder() .rustfmt_bindings(false) .header("wrapper.h") .clang_args(clang_args.iter()) .whitelist_recursively(false) .whitelist_type("^PAPI_[[:alpha:]_]+") .whitelist_type("^_papi_[[:alpha:]_]+") .whitelist_function("^PAPI_[[:alpha:]_]+") .whitelist_function("^_papi_[[:alpha:]_]+") .whitelist_var("^PAPI_[[:alpha:]_]+") .whitelist_var("^_papi_[[:alpha:]_]+") .whitelist_type("caddr_t") .whitelist_type("__caddr_t") .whitelist_type("_dmem_t") .whitelist_type("event_info") .generate() .expect("Unable to generate PAPI bindings") .write_to_file(out_path.join("bindings.rs")) .expect("Unable to write PAPI bindings"); let codegen_stdout = Command::new("sh") .arg("codegen.sh") .output() .unwrap() .stdout; let mut file = File::create(out_path.join("codegen.rs"))?; file.write_all(&codegen_stdout)?; file.sync_all()?; Ok(()) } <file_sep># Copyright 2019 <NAME> # Author: <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or # http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or # http://opensource.org/licenses/MIT>, at your option. This file may not be # copied, modified, or distributed except according to those terms. gcc -I$PAPI_PREFIX/include -L$PAPI_PREFIX/lib -o codegen codegen.c ./codegen rm -f codegen <file_sep>[package] name = "papi-sys" version = "0.1.1" authors = ["<NAME> <<EMAIL>>", "<NAME> <<EMAIL>>"] description = "PAPI (Performance API) bindings for Rust" repository = "https://github.com/LutzCle/papi-sys" readme = "README.md" license = "MIT OR Apache-2.0" keywords = ["hardware-counters", "performance-counters"] categories = ["hardware-support", "development-tools::profiling", "api-bindings"] edition = "2018" build = "build.rs" links = "papi_wrapper" [badges] travis-ci = { repository = "LutzCle/papi-sys" } maintenance = { status = "actively-developed" } [dependencies] [build-dependencies] bindgen = "0.50" [dev-dependencies] lazy_static = "1.4.0"
312db9c2acefcda46c83419a75be2e85d9b77e87
[ "Markdown", "TOML", "Rust", "C", "Shell" ]
7
C
LutzCle/papi-sys
790c48aba9f939578e31d135bf8162a64095698a
cd5a6be3ec783022200fd1c5d6a36233facddd40
refs/heads/master
<repo_name>kmhk/Miacommunity_iOS<file_sep>/Miacommunity/Data/MiaThread.swift // // MiaThread.swift // Miacommunity // // Created by admin on 10/24/19. // Copyright © 2019 KMHK. All rights reserved. // import Foundation struct MiaThread { var node_id: Int? var thread_id: Int? var title: String? var createDate: Date? var creator: MiaUser? var replies: Int? var views: Int? var category: String? init(dict: [String: Any]) { node_id = ((dict["node_id"] ?? 0) as! Int) thread_id = ((dict["thread_id"] ?? 0) as! Int) title = ((dict["title"] ?? "") as! String) createDate = Date(timeIntervalSince1970: (TimeInterval((dict["post_date"] ?? 0) as! Int))) if let userDict = dict["User"], !(userDict is NSNull) { creator = MiaUser(dict: userDict as! [String: Any]) } replies = ((dict["reply_count"] ?? 0) as! Int) views = ((dict["view_count"] ?? 0) as! Int) if let forum = dict["Forum"], let breads = (forum as! [String: Any])["breadcrumbs"], let bread = (breads as! [Any]).first { let breadDict = (bread as! [String: Any]) category = ((breadDict["title"] ?? "") as! String) } } } <file_sep>/Miacommunity/ViewModel/ForumsViewModel.swift // // ForumsViewModel.swift // Miacommunity // // Created by admin on 12/5/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class ForumsViewModel: NSObject { var forumStrings = [String]() var forums = [String: [MiaNode]]() func getAllForums(completion: @escaping (()->()), failed: @escaping ((Error)->())) { forumStrings = [String]() forums = [String: [MiaNode]]() let request = MiaRequest.getnodes request.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } // if let tree = dict!["tree_map"] { // self.nodeTreeMap = (tree as! [String: Any]) // } if let nodes = (dict!["nodes"] as? [Any]) { for nodeDict in nodes { self.fetechForum(dict: nodeDict as! [String: Any]) } } completion() } } func getSubscribeForums(completion: @escaping (()->()), failed: @escaping ((Error)->())) { forumStrings = [String]() forums = [String: [MiaNode]]() let request = MiaRequest.getSubscribeForums request.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } if let nodes = (dict!["watched_forums"] as? [Any]) { for nodeDict in nodes { self.fetechForum(dict: nodeDict as! [String: Any]) } } completion() } } func setSubscribeForum(id: Int, flag: Bool, completion: @escaping (()->()), failed: @escaping ((Error)->())) { let request = MiaRequest.setSubscribeForum(id: id, flag: flag) request.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } completion() } } // MARK: private methods private func fetechForum(dict: [String: Any]) { guard let id = dict["node_type_id"] else { return } guard (id as! String) == "Forum" else { return } let node = MiaNode(dict: dict) // get section title var categoryString = "Unknown Category" if let breadcrumbs = (dict["breadcrumbs"] as? [Any]) { guard breadcrumbs.count < 3 else { return } let bread = breadcrumbs.first as? [String: Any] if let title = bread!["title"] { categoryString = title as! String } } // add section title when it is not existing if forumStrings.firstIndex(of: categoryString) == nil { forumStrings.append(categoryString) } // append node to map based on section title var subForums = [MiaNode]() if let tmp = forums[categoryString] { subForums = tmp } subForums.append(node) forums[categoryString] = subForums } } <file_sep>/Miacommunity/ViewControllers/Message/RecentVC.swift // // RecentVC.swift // Miacommunity // // Created by com on 9/23/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit import SVProgressHUD class RecentVC: UIViewController { @IBOutlet weak var tableView: UITableView! var rooms = [MiaRoom]() let refreshControl = UIRefreshControl() var curPage = 1 var isLoadable = true override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. tableView.tableFooterView = UIView() if #available(iOS 10.0, *) { tableView.refreshControl = self.refreshControl } else { tableView.addSubview(refreshControl) } self.refreshControl.addTarget(self, action: #selector(refresh(_:)), for: .valueChanged) loadUsers() } override func viewWillAppear(_ animated: Bool) { let navVC = self.tabBarController?.navigationController as? MainNavVC navVC?.navigationBar.isHidden = false self.tabBarController?.navigationItem.title = "Recent" // add menu item let menuBtn = UIBarButtonItem(image: UIImage(named: "icoSideMenu"), style: .plain, target: navVC, action: #selector(navVC?.btnMenuTapped(_:))) menuBtn.tintColor = UIColor.white self.tabBarController?.navigationItem.rightBarButtonItem = menuBtn } @objc func refresh(_ sender: Any) { rooms.removeAll() curPage = 1 tableView.reloadData() loadUsers() } func loadUsers() { let errorHandler: ((String) -> ()) = { msg in let alert = UIAlertController(title: "", message: msg, preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } SVProgressHUD.show() let req = MiaRequest.conversations(page: curPage) req.sendRequest { (dict, error) in SVProgressHUD.dismiss() guard error == nil else { errorHandler(error!.localizedDescription) return } if let tmp = dict!["conversations"] { let conversations = tmp as! [Any] for item in conversations { let room = MiaRoom(dict: item as! [String: Any]) self.rooms.append(room) } } // check if there is more loadable page if let pageDict = dict!["pagination"] { let d = pageDict as? [String: Any] if let lastPage = d!["last_page"] { if self.curPage >= (lastPage as! Int) { self.isLoadable = false } else { self.isLoadable = true } } } self.tableView.reloadData() self.refreshControl.endRefreshing() } } override func prepare(for segue: UIStoryboardSegue, sender: Any?) { if segue.identifier == "segueMessage" { let vc = segue.destination as? MessageVC vc?.room = self.rooms[(sender as! IndexPath).row] } } } extension RecentVC: UITableViewDelegate, UITableViewDataSource { func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return rooms.count } func tableView(_ tableView: UITableView, heightForRowAt indexPath: IndexPath) -> CGFloat { return 70.0 } func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { var cell = tableView.dequeueReusableCell(withIdentifier: "ContactTVCell") as? ContactTVCell if cell == nil { cell = UITableViewCell(style: .default, reuseIdentifier: "ContactTVCell") as? ContactTVCell } let room = rooms[indexPath.row] cell?.backgroundColor = UIColor.clear cell?.lblName.text = room.recips?.first?.userName if room.recips?.first?.avatar == nil { cell?.imgAvatar.image = UIImage(named: "icoAvatar") } else { cell?.imgAvatar.sd_setImage(with: URL(string: (room.recips?.first?.avatar!)!), placeholderImage: UIImage(named: "icoAvatar")) } cell?.imgAvatar.layer.cornerRadius = (cell?.imgAvatar.frame.width)! / 2 cell?.imgAvatar.clipsToBounds = true // load more page on the last cell when there is loadable page if self.isLoadable == true && indexPath.row == rooms.count - 1 { curPage = curPage + 1 loadUsers() } return cell! } func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) { tableView.deselectRow(at: indexPath, animated: true) self.performSegue(withIdentifier: "segueMessage", sender: indexPath) } } <file_sep>/Miacommunity/ViewControllers/Home/MenuVC.swift // // MenuVC.swift // Miacommunity // // Created by com on 9/23/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class MenuVC: UIViewController { var tableView: UITableView? var parentVC: MainNavVC? let tables = ["Home", "Articles", "Blogs", "Showcase", "Forum", "Message", "Logout"] override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. view.backgroundColor = UIColor.miaDarkGreen } override func viewWillAppear(_ animated: Bool) { super.viewWillAppear(animated) tableView = UITableView(frame: CGRect(x: 0, y: 0, width: view.frame.width - 20, height: view.frame.height)) tableView?.backgroundColor = UIColor.clear tableView?.register(UITableViewCell.self, forCellReuseIdentifier: "menuTableCell") tableView?.dataSource = self tableView?.delegate = self tableView?.tableFooterView = UIView() self.view.addSubview(tableView!) } override var preferredStatusBarStyle: UIStatusBarStyle { return .lightContent } /* // MARK: - Navigation // In a storyboard-based application, you will often want to do a little preparation before navigation override func prepare(for segue: UIStoryboardSegue, sender: Any?) { // Get the new view controller using segue.destination. // Pass the selected object to the new view controller. } */ } extension MenuVC: UITableViewDelegate, UITableViewDataSource { func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return tables.count } func tableView(_ tableView: UITableView, heightForRowAt indexPath: IndexPath) -> CGFloat { return 44.0 } func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { var cell = tableView.dequeueReusableCell(withIdentifier: "menuTableCell") if cell == nil { cell = UITableViewCell(style: .default, reuseIdentifier: "menuTableCell") } cell?.backgroundColor = UIColor.clear cell?.textLabel?.text = tables[indexPath.row] cell?.textLabel?.textColor = UIColor.white return cell! } func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) { tableView.deselectRow(at: indexPath, animated: true) self.navigationController?.dismiss(animated: false, completion: nil) parentVC?.chooseRootVC(index: indexPath.row) } } <file_sep>/Miacommunity/Data/MiaUser.swift // // MiaUser.swift // Miacommunity // // Created by com on 9/20/19. // Copyright © 2019 KMHK. All rights reserved. // import Foundation var curUser: MiaUser? var contacts = [MiaUser]() struct MiaUser { var userName: String? var avatar: String? var uid: Int? var email: String? } extension MiaUser { init(dict: [String: Any]) { userName = ((dict["username"] ?? "") as! String) uid = ((dict["user_id"] ?? "") as! Int) email = ((dict["email"] ?? "") as! String) if let tmp = dict["avatar_urls"] { let d = tmp as? [String: String?] avatar = d!["o"]! } } func storeUserLogin() { UserDefaults.standard.setValue(userName!, forKey: "username") UserDefaults.standard.synchronize() } } <file_sep>/Miacommunity/ViewControllers/Message/MoreVC.swift // // MoreVC.swift // Miacommunity // // Created by com on 9/23/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class MoreVC: UIViewController { override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. } override func viewWillAppear(_ animated: Bool) { let navVC = self.tabBarController?.navigationController as? MainNavVC navVC?.navigationBar.isHidden = false self.tabBarController?.navigationItem.title = "Featured" // add menu item let menuBtn = UIBarButtonItem(image: UIImage(named: "icoSideMenu"), style: .plain, target: navVC, action: #selector(navVC?.btnMenuTapped(_:))) menuBtn.tintColor = UIColor.white self.tabBarController?.navigationItem.rightBarButtonItem = menuBtn } /* // MARK: - Navigation // In a storyboard-based application, you will often want to do a little preparation before navigation override func prepare(for segue: UIStoryboardSegue, sender: Any?) { // Get the new view controller using segue.destination. // Pass the selected object to the new view controller. } */ } <file_sep>/Miacommunity/Data/MiaRoom.swift // // MiaRoom.swift // Miacommunity // // Created by com on 9/30/19. // Copyright © 2019 KMHK. All rights reserved. // import Foundation struct MiaRoom { var roomID: Int var recips: [MiaUser]? } extension MiaRoom { init(dict: [String: Any]) { roomID = ((dict["conversation_id"] ?? "") as! Int) recips = [MiaUser]() let recipDict = dict["recipients"] as! [String: String] for recipID in recipDict.keys { if let user = (contacts.filter { $0.uid == Int(recipID) }.first) { recips?.append(user) } else { recips?.append(MiaUser(userName: recipDict[recipID], avatar: nil, uid: Int(recipID), email: "")) } } } } <file_sep>/Miacommunity/CustomeUIs/MiaTextField.swift // // MiaTextField.swift // Miacommunity // // Created by com on 9/23/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class MiaTextField: UITextField { let padding = UIEdgeInsets(top: 0, left: 10, bottom: 0, right: 10) override func layoutSubviews() { super.layoutSubviews() let border = CALayer() border.frame = CGRect(x: 0, y: frame.height - 1, width: frame.width, height: 1) border.backgroundColor = UIColor.white.cgColor layer.addSublayer(border) } override open func textRect(forBounds bounds: CGRect) -> CGRect { return bounds.inset(by: padding) } override open func placeholderRect(forBounds bounds: CGRect) -> CGRect { return bounds.inset(by: padding) } override open func editingRect(forBounds bounds: CGRect) -> CGRect { return bounds.inset(by: padding) } /* // Only override draw() if you perform custom drawing. // An empty implementation adversely affects performance during animation. override func draw(_ rect: CGRect) { // Drawing code } */ } <file_sep>/Miacommunity/ViewModel/ThreadsViewModel.swift // // ThreadsViewModel.swift // Miacommunity // // Created by admin on 12/5/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class ThreadsViewModel: NSObject { var threads = [MiaThread]() func getAllThreads(completion: @escaping (()->()), failed: @escaping ((Error)->())) { let req = MiaRequest.getAllThreads req.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } var array = [MiaThread]() if let nodes = (dict!["threads"] as? [Any]) { for threadDict in nodes { let thread = MiaThread(dict: threadDict as! [String: Any]) array.append(thread) } } self.threads = array completion() } } func getSubscribeThreads(completion: @escaping (()->()), failed: @escaping ((Error)->())) { let req = MiaRequest.getSubscribeThreads req.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } var array = [MiaThread]() if let nodes = (dict!["threads"] as? [Any]) { for threadDict in nodes { let thread = MiaThread(dict: threadDict as! [String: Any]) array.append(thread) } } self.threads = array completion() } } func setSubscribeThread(id: Int, completion: @escaping (()->()), failed: @escaping ((Error)->())) { let req = MiaRequest.setSubscribeThread(id: id) req.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } completion() } } } <file_sep>/Podfile # Uncomment the next line to define a global platform for your project # platform :ios, '9.0' target 'Miacommunity' do # Comment the next line if you're not using Swift and don't want to use dynamic frameworks use_frameworks! # Pods for Miacommunity pod 'SDWebImage' pod 'ChatViewController' pod 'SVProgressHUD' pod 'SideMenu' end <file_sep>/Miacommunity/Data/MiaMessage.swift // // MiaMessage.swift // Miacommunity // // Created by com on 9/20/19. // Copyright © 2019 KMHK. All rights reserved. // import Foundation enum MiaMessageType: Int { case text = 0 } struct MiaMessage { var type: MiaMessageType? var sender: MiaUser? var sentDate: Date? var messageText: String? } extension MiaMessage { init(dict: [String: Any]) { type = .text messageText = ((dict["message"] ?? "") as! String) if let date = dict["message_date"] { sentDate = Date(timeIntervalSince1970: date as! TimeInterval) } else { sentDate = Date(timeIntervalSince1970: 0) } if let user = dict["User"] { sender = MiaUser(dict: user as! [String: Any]) } } } <file_sep>/Miacommunity/ViewControllers/Login/LoginVC.swift // // LoginVC.swift // Miacommunity // // Created by com on 9/23/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit import SVProgressHUD class LoginVC: UIViewController { @IBOutlet weak var txtName: MiaTextField! @IBOutlet weak var txtPwd: MiaTextField! @IBOutlet weak var imgViewLogo: UIImageView! override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(onTapGesture(_:)))) txtName.delegate = self txtPwd.delegate = self imgViewLogo.layer.cornerRadius = 20 imgViewLogo.clipsToBounds = true } override func viewWillAppear(_ animated: Bool) { navigationController?.navigationBar.isHidden = true } /* // MARK: - Navigation // In a storyboard-based application, you will often want to do a little preparation before navigation override func prepare(for segue: UIStoryboardSegue, sender: Any?) { // Get the new view controller using segue.destination. // Pass the selected object to the new view controller. } */ @objc func onTapGesture(_ sender: Any) { txtName.resignFirstResponder() txtPwd.resignFirstResponder() } @IBAction func btnLoginTapped(_ sender: Any) { onTapGesture(sender) guard invalidParam() == true else { return } let errorHandler: ((String) -> ()) = { msg in let alert = UIAlertController(title: "", message: msg, preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } SVProgressHUD.show() let req = MiaRequest.login(email: txtName.text!, password: txtPwd.text!) req.sendRequest { (dict, error) in SVProgressHUD.dismiss() guard error == nil else { errorHandler(error!.localizedDescription) return } curUser = MiaUser(dict: dict!) curUser?.storeUserLogin() let transition = CATransition() transition.duration = 0.3 transition.type = .push transition.subtype = .fromRight self.navigationController?.view.layer.add(transition, forKey: kCATransition) self.navigationController?.dismiss(animated: false, completion: nil) } } // MARK: private method private func invalidParam() -> Bool { let errorHandler: ((String) -> ()) = { name in let alert = UIAlertController(title: "", message: "Please input " + name + " correctly", preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } if txtName.text! == "" { errorHandler("username") return false } if txtPwd.text! == "" { errorHandler("password") return false } return true } } extension LoginVC: UITextFieldDelegate { func textFieldShouldReturn(_ textField: UITextField) -> Bool { if textField == txtName { txtPwd.becomeFirstResponder() } else { onTapGesture(self) btnLoginTapped(self) } return true } } <file_sep>/Miacommunity/Views/Forum/ForumPostTVCell.swift // // ForumPostTVCell.swift // Miacommunity // // Created by admin on 10/25/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class ForumPostTVCell: UITableViewCell { @IBOutlet weak var imgAvatar: UIImageView! @IBOutlet weak var lblName: UILabel! @IBOutlet weak var lblDate: UILabel! @IBOutlet weak var lblCategory: UILabel! @IBOutlet weak var txtMessage: UITextView! override func awakeFromNib() { super.awakeFromNib() // Initialization code } override func setSelected(_ selected: Bool, animated: Bool) { super.setSelected(selected, animated: animated) // Configure the view for the selected state } } <file_sep>/Miacommunity/Data/MiaPost.swift // // MiaPost.swift // Miacommunity // // Created by admin on 10/25/19. // Copyright © 2019 KMHK. All rights reserved. // import Foundation struct MiaPost { var thread_id: Int? var post_id: Int? var message: String? var postDate: Date? var creator: MiaUser? init(dict: [String: Any]) { thread_id = ((dict["thread_id"] ?? 0) as! Int) post_id = ((dict["post_id"] ?? 0) as! Int) message = ((dict["message"] ?? "") as! String) postDate = Date(timeIntervalSince1970: (TimeInterval((dict["post_date"] ?? 0) as! Int))) if let userDict = dict["User"], !(userDict is NSNull) { creator = MiaUser(dict: userDict as! [String: Any]) } } } <file_sep>/Miacommunity/Views/Forum/ForumNodeTVCell.swift // // ForumNodeTVCell.swift // Miacommunity // // Created by admin on 10/24/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class ForumNodeTVCell: UITableViewCell { @IBOutlet weak var imgIcon: UIImageView! @IBOutlet weak var lblTitle: UILabel! @IBOutlet weak var lblThreads: UILabel! @IBOutlet weak var lblMessages: UILabel! @IBOutlet weak var lblNote: UILabel! @IBOutlet weak var viewContainer: UIView! @IBOutlet weak var imgAvatar: UIImageView! @IBOutlet weak var lblUser: UILabel! @IBOutlet weak var lblMsg: UILabel! @IBOutlet weak var lblDate: UILabel! @IBOutlet weak var btnSubscribe: UIButton! @IBOutlet weak var imgSubscribe: UIImageView! override func awakeFromNib() { super.awakeFromNib() // Initialization code } override func setSelected(_ selected: Bool, animated: Bool) { super.setSelected(selected, animated: animated) // Configure the view for the selected state } } <file_sep>/Miacommunity/Data/MiaNode.swift // // MiaNode.swift // Miacommunity // // Created by admin on 10/22/19. // Copyright © 2019 KMHK. All rights reserved. // import Foundation enum NodeType { case category case forum case thread case post } struct NodeData { var creator: String? var message: String? var date: Date? var threadCount: Int? var msgCount: Int? init(dict: [String: Any]) { creator = ((dict["last_post_username"] ?? "") as! String) message = ((dict["last_thread_title"] ?? "") as! String) date = Date(timeIntervalSince1970: (TimeInterval((dict["last_post_date"] ?? 0) as! Int))) threadCount = ((dict["discussion_count"] ?? 0) as! Int) msgCount = ((dict["message_count"] ?? 0) as! Int) } } struct MiaNode { var node_id: Int? var parentID: Int? var type: NodeType? var title: String? var data: NodeData? init(dict: [String: Any]) { node_id = ((dict["node_id"] ?? 0) as! Int) parentID = ((dict["parent_node_id"] ?? 0) as! Int) if let typeString = dict["node_type_id"] { switch typeString as! String { case "Forum": type = .forum case "Category": type = .category default: type = .thread } } title = ((dict["title"] ?? "") as! String) if let typeData = dict["type_data"] { data = NodeData(dict: typeData as! [String: Any]) } } } <file_sep>/Miacommunity/ViewControllers/Forum/ForumsVC.swift // // ForumsVC.swift // Miacommunity // // Created by admin on 12/5/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit import SVProgressHUD class ForumsVC: UIViewController { @IBOutlet weak var tableView: UITableView! @IBOutlet weak var segment: UISegmentedControl! let model = ForumsViewModel() var errorHandler: ((Error)->())? override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. errorHandler = { error in SVProgressHUD.dismiss() let alert = UIAlertController(title: "", message: error.localizedDescription, preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } loadForums(0) } override func viewWillAppear(_ animated: Bool) { let navVC = self.tabBarController?.navigationController as? MainNavVC navVC?.navigationBar.isHidden = false self.tabBarController?.navigationItem.title = "Forum" // add menu item let menuBtn = UIBarButtonItem(image: UIImage(named: "icoSideMenu"), style: .plain, target: navVC, action: #selector(navVC?.btnMenuTapped(_:))) menuBtn.tintColor = UIColor.white self.tabBarController?.navigationItem.rightBarButtonItem = menuBtn } // MARK: actions @IBAction func segmentedControlChanged(_ sender: Any) { loadForums(segment.selectedSegmentIndex) } @objc func btnSubcribeTapped(_ sender: Any) { let id = (sender as! UIButton).tag let flag = (segment.selectedSegmentIndex == 1 ? true : false) setSubscribe(id: id, flag: flag) } /* // MARK: Navigation // In a storyboard-based application, you will often want to do a little preparation before navigation override func prepare(for segue: UIStoryboardSegue, sender: Any?) { // Get the new view controller using segue.destination. // Pass the selected object to the new view controller. } */ // MARK: private methods private func loadForums(_ type: Int) { SVProgressHUD.show() if type == 0 { // for all forums model.getAllForums(completion: { SVProgressHUD.dismiss() self.tableView.reloadData() }, failed: errorHandler!) } else { model.getSubscribeForums(completion: { SVProgressHUD.dismiss() self.tableView.reloadData() }, failed: errorHandler!) } } private func setSubscribe(id: Int, flag: Bool) { SVProgressHUD.show() model.setSubscribeForum(id: id, flag: flag, completion: { self.loadForums(flag == true ? 1 : 0) }, failed: errorHandler!) } } // MARK: - table view delegate & data source extension ForumsVC: UITableViewDataSource, UITableViewDelegate { func numberOfSections(in tableView: UITableView) -> Int { return model.forumStrings.count } func tableView(_ tableView: UITableView, titleForHeaderInSection section: Int) -> String? { let key = model.forumStrings[section] return key } /* func tableView(_ tableView: UITableView, viewForHeaderInSection section: Int) -> UIView? { let keyArray = Array(model.forums.keys) let key = keyArray[section] let view = UIView(frame: CGRect(x: 0, y: 0, width: tableView.frame.width, height: 44)) let label = UILabel(frame: CGRect(x: 10, y: 0, width: view.frame.width - 20, height: view.frame.height)) label.text = key label.textColor = UIColor.gray label.font = UIFont.boldSystemFont(ofSize: 14) view.addSubview(label) return view }*/ func tableView(_ tableView: UITableView, heightForHeaderInSection section: Int) -> CGFloat { return 44 } func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { let key = model.forumStrings[section] let items = model.forums[key] return items!.count } func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { let cell = tableView.dequeueReusableCell(withIdentifier: "ForumNodeTVCell", for: indexPath) as! ForumNodeTVCell let key = model.forumStrings[indexPath.section] let items = model.forums[key] let item = items![indexPath.row] cell.imgIcon.image = UIImage(named: "icoForumItem") cell.lblTitle.text = item.title cell.viewContainer.isHidden = true cell.lblNote.isHidden = false cell.btnSubscribe.addTarget(self, action: #selector(btnSubcribeTapped(_:)), for: .touchUpInside) cell.btnSubscribe.tag = item.node_id! if segment.selectedSegmentIndex == 0 { cell.imgSubscribe.image = UIImage(named: "icoSubscribe_normal") } else { cell.imgSubscribe.image = UIImage(named: "icoSubscribe_highlight") } if let data = item.data, data.msgCount! > 0 { cell.viewContainer.isHidden = false cell.lblNote.isHidden = true cell.lblUser.text = data.creator! cell.lblMsg.text = data.message! cell.lblMessages.text = String(data.msgCount!) cell.lblThreads.text = String(data.threadCount!) cell.lblDate.text = data.date!.getString() cell.imgAvatar.layer.cornerRadius = cell.imgAvatar.frame.width / 2 cell.imgAvatar.clipsToBounds = true cell.imgAvatar.layer.borderColor = UIColor.lightGray.cgColor cell.imgAvatar.layer.borderWidth = 1.0 } return cell } func tableView(_ tableView: UITableView, heightForRowAt indexPath: IndexPath) -> CGFloat { return 95 } func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) { tableView.deselectRow(at: indexPath, animated: true) let key = model.forumStrings[indexPath.section] let items = model.forums[key] let item = items![indexPath.row] let storyboard = UIStoryboard(name: "Forum", bundle: nil) let vc = storyboard.instantiateViewController(withIdentifier: "SubForumVC") as! SubForumVC vc.node = item self.tabBarController?.navigationController?.pushViewController(vc, animated: true) } } <file_sep>/Miacommunity/ViewControllers/Forum/ThreadsVC.swift // // ThreadsVC.swift // Miacommunity // // Created by admin on 12/5/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit import SVProgressHUD class ThreadsVC: UIViewController { @IBOutlet weak var tableView: UITableView! @IBOutlet weak var segment: UISegmentedControl! let model = ThreadsViewModel() var errorHandler: ((Error)->())? override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. errorHandler = { error in SVProgressHUD.dismiss() let alert = UIAlertController(title: "", message: error.localizedDescription, preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } } override func viewWillAppear(_ animated: Bool) { let navVC = self.tabBarController?.navigationController as? MainNavVC navVC?.navigationBar.isHidden = false self.tabBarController?.navigationItem.title = "Thread" // add menu item let menuBtn = UIBarButtonItem(image: UIImage(named: "icoSideMenu"), style: .plain, target: navVC, action: #selector(navVC?.btnMenuTapped(_:))) menuBtn.tintColor = UIColor.white self.tabBarController?.navigationItem.rightBarButtonItem = menuBtn loadThreads(0) } // MARK: actions @IBAction func segmentedControlChanged(_ sender: Any) { loadThreads(segment.selectedSegmentIndex) } @objc func btnSubcribeTapped(_ sender: Any) { let id = (sender as! UIButton).tag self.setSubscribe(id: id) } @objc func btnReplyTapped(_ sender: Any) { let alert = UIAlertController(title: "Reply", message: "Input Reply to this thread", preferredStyle: .alert) alert.addTextField { (textField) in textField.placeholder = "Type Contents" } alert.addAction(UIAlertAction(title: "OK", style: .default, handler: { (action) in })) alert.addAction(UIAlertAction(title: "Cancel", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } /* // MARK: Navigation // In a storyboard-based application, you will often want to do a little preparation before navigation override func prepare(for segue: UIStoryboardSegue, sender: Any?) { // Get the new view controller using segue.destination. // Pass the selected object to the new view controller. } */ // MARK: private methods private func loadThreads(_ type: Int) { SVProgressHUD.show() if type == 0 { // for all threads model.getAllThreads(completion: { SVProgressHUD.dismiss() self.tableView.reloadData() }, failed: errorHandler!) } else { // for subscribed threads model.getSubscribeThreads(completion: { SVProgressHUD.dismiss() self.tableView.reloadData() }, failed: errorHandler!) } } private func setSubscribe(id: Int) { SVProgressHUD.show() model.setSubscribeThread(id: id, completion: { self.loadThreads(self.segment.selectedSegmentIndex) }, failed: errorHandler!) } } // MARK: - table view delegate & data source extension ThreadsVC: UITableViewDataSource, UITableViewDelegate { func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return model.threads.count } func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { let cell = tableView.dequeueReusableCell(withIdentifier: "ForumThreadTVCell", for: indexPath) as! ForumThreadTVCell let item = model.threads[indexPath.row] cell.lblTitle.text = item.title cell.lblDate.text = item.createDate!.getString() cell.lblCategory.text = item.category if item.replies! > 0 { cell.lblReplies.text = String(item.replies!) + " replies" } else { cell.lblReplies.text = "reply" } cell.btnReply.addTarget(self, action: #selector(btnReplyTapped(_:)), for: .touchUpInside) if segment.selectedSegmentIndex == 0 { cell.imgSubscribe.image = UIImage(named: "icoSubscribe_normal") } else { cell.imgSubscribe.image = UIImage(named: "icoSubscribe_highlight") } cell.btnSubscribe.tag = item.thread_id! cell.btnSubscribe.addTarget(self, action: #selector(btnSubcribeTapped(_:)), for: .touchUpInside) if let creator = item.creator { cell.lblUser.text = creator.userName if let avatar = creator.avatar { cell.imgAvatar.sd_setImage(with: URL(string: avatar), placeholderImage: UIImage(named: "icoAvatar")) } else { cell.imgAvatar.image = UIImage(named: "icoAvatar") } } else { cell.lblUser.text = "unknown user" cell.imgAvatar.image = UIImage(named: "icoAvatar") } cell.imgAvatar.layer.cornerRadius = cell.imgAvatar.frame.width / 2 cell.imgAvatar.clipsToBounds = true cell.imgAvatar.layer.borderColor = UIColor.lightGray.cgColor cell.imgAvatar.layer.borderWidth = 1.0 return cell } func tableView(_ tableView: UITableView, heightForRowAt indexPath: IndexPath) -> CGFloat { return 142 } func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) { tableView.deselectRow(at: indexPath, animated: true) } } <file_sep>/Miacommunity/Views/Forum/ForumThreadTVCell.swift // // ForumThreadTVCell.swift // Miacommunity // // Created by admin on 10/24/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class ForumThreadTVCell: UITableViewCell { @IBOutlet weak var lblTitle: UILabel! @IBOutlet weak var viewReplyContainer: UIView! @IBOutlet weak var lblReplies: UILabel! @IBOutlet weak var btnReply: UIButton! @IBOutlet weak var btnLike: UIButton! @IBOutlet weak var imgSubscribe: UIImageView! @IBOutlet weak var btnSubscribe: UIButton! @IBOutlet weak var lblDate: UILabel! @IBOutlet weak var imgAvatar: UIImageView! @IBOutlet weak var lblUser: UILabel! @IBOutlet weak var lblCategory: UILabel! override func awakeFromNib() { super.awakeFromNib() // Initialization code } override func setSelected(_ selected: Bool, animated: Bool) { super.setSelected(selected, animated: animated) // Configure the view for the selected state } } <file_sep>/Miacommunity/ViewModel/PostsViewModel.swift // // PostsViewModel.swift // Miacommunity // // Created by admin on 12/6/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class PostsViewModel: NSObject { var posts = [MiaPost]() func getPosts(id: Int, completion: @escaping (([Any])->()), failed: @escaping ((Error)->())) { let request = MiaRequest.getposts(id: id) request.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } if let posts = dict!["posts"]{ completion(posts as! [Any]) } else { let e = NSError(domain: "", code: 401, userInfo: [NSLocalizedDescriptionKey: "unknown error"]) failed(e) } } } func getMyPosts(completion: @escaping (([Any])->()), failed: @escaping ((Error)->())) { let request = MiaRequest.getMyPosts request.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } if let posts = dict!["profile_posts"]{ completion(posts as! [Any]) } else { let e = NSError(domain: "", code: 401, userInfo: [NSLocalizedDescriptionKey: "unknown error"]) failed(e) } } } } <file_sep>/Miacommunity/Backend/MiaRequest.swift // // MiaRequest.swift // Miacommunity // // Created by com on 9/23/19. // Copyright © 2019 KMHK. All rights reserved. // import Foundation enum MiaRequest { case login(email: String, password: String) case signup(userName: String, email: String, password: String) case users(page: Int) case user(id: Int) case conversations(page: Int) case message(id: Int) case sendmessage(id: Int, text: String) case startmessage(recip: Int) case getnodes case getnode(id: Int) case getSubscribeForums case setSubscribeForum(id: Int, flag: Bool) case getAllThreads case getSubscribeThreads case setSubscribeThread(id: Int) case getthreads(id: Int) case getposts(id: Int) case getMyPosts case postthread(id: Int, title: String, message: String) case postcomment(id: Int, message: String) } extension MiaRequest { var superApiKey: String { return "<KEY>" } var baseURL: URL { return URL(string: "http://www.miacommunity.net")! } var fullURL: URL { switch self { case .login: return URL(string: "/api/auth", relativeTo: baseURL)! case .signup: return URL(string: "/api/users", relativeTo: baseURL)! case .users(let page): return URL(string: String(format: "/api/users?page=%d", page), relativeTo: baseURL)! case .user(let id): return URL(string: String(format: "/api/users/%d", id), relativeTo: baseURL)! case .conversations(let page): return URL(string: String(format: "/api/conversations?page=%d", page), relativeTo: baseURL)! case .message(let id): return URL(string: String(format: "/api/conversations/%d/messages", id), relativeTo: baseURL)! case .sendmessage: return URL(string: "/api/conversation-messages", relativeTo: baseURL)! case .startmessage: return URL(string: "/api/conversations", relativeTo: baseURL)! case .getnodes: return URL(string: "/api/nodes", relativeTo: baseURL)! case .getnode(let id): return URL(string: String(format: "/api/nodes/%d", id), relativeTo: baseURL)! case .getSubscribeForums: return URL(string: "/api/nodes/watch-list", relativeTo: baseURL)! case .setSubscribeForum(let id, _): return URL(string: String(format: "/api/forums/%d/watch", id), relativeTo: baseURL)! case .getAllThreads: return URL(string: "/api/threads", relativeTo: baseURL)! case .getSubscribeThreads: return URL(string: "/api/threads/watch-list", relativeTo: baseURL)! case .setSubscribeThread(let id): return URL(string: String(format: "/api/threads/%d/watch", id), relativeTo: baseURL)! case .getthreads(let id): return URL(string: String(format: "/api/forums/%d/threads", id), relativeTo: baseURL)! case .getposts(let id): return URL(string: String(format: "/api/threads/%d/posts", id), relativeTo: baseURL)! case .getMyPosts: return URL(string: String(format: "/api/users/%d?with_posts=true", curUser!.uid!), relativeTo: baseURL)! case .postthread: return URL(string: "/api/threads", relativeTo: baseURL)! case .postcomment: return URL(string: "/api/posts", relativeTo: baseURL)! } } func sendRequest(completion: @escaping ([String: Any]?, Error?) -> Void) { var req = URLRequest(url: fullURL) req.setValue(superApiKey, forHTTPHeaderField: "XF-Api-Key") var bodyParam = [String: Any]() switch self { case .login(let email, let password): bodyParam = ["login": email, "password": <PASSWORD>] req.httpMethod = "POST" case .signup(let username, let email, let password): bodyParam = ["username": username, "password": <PASSWORD>, "email": email] req.httpMethod = "POST" req.setValue("1", forHTTPHeaderField: "XF-Api-User") case .users: bodyParam = [:] req.httpMethod = "GET" case .user: bodyParam = [:] req.httpMethod = "GET" case .conversations: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .message: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .sendmessage(let id, let msg): bodyParam = ["conversation_id": id, "message": msg] req.httpMethod = "POST" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .startmessage(let recip): bodyParam = ["recipient_ids[0]": recip, "message": "I'd like to message with you.", "title": "Hello"] req.httpMethod = "POST" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .getnodes: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .getnode: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .getSubscribeForums: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .setSubscribeForum(_, let flag): bodyParam = ["send_alert": "1", "send_email": "1", "stop": (flag ? "1": "0")] req.httpMethod = "POST" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .getAllThreads: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .getSubscribeThreads: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .setSubscribeThread: bodyParam = [:] req.httpMethod = "POST" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .getthreads: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .getposts: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .getMyPosts: bodyParam = [:] req.httpMethod = "GET" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .postthread(let id, let title, let message): bodyParam = ["node_id": id, "title": title, "message": message] req.httpMethod = "POST" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") case .postcomment(let id, let message): bodyParam = ["thread_id": id, "message": message] req.httpMethod = "POST" req.setValue(String(format: "%d", (curUser?.uid!)!), forHTTPHeaderField: "XF-Api-User") } req.httpBody = bodyParam.percentEscaped().data(using: .utf8) // print("\(String(data: req.httpBody!, encoding: .utf8))") URLSession.shared.dataTask(with: req) { (data, response, error) in DispatchQueue.main.async { guard error == nil else { completion(nil, error) return } do { let dict = try JSONSerialization.jsonObject(with: data!, options: []) as! [String: Any] switch self { case .login, .signup: if dict["success"] != nil { let d = dict["user"] completion(d as? [String: Any], nil) } else if let fail = dict["errors"] { let d = (fail as? [Any])?.first as? [String: Any] let errorMsg = d!["message"] let e = NSError(domain: "", code: 401, userInfo: [NSLocalizedDescriptionKey: errorMsg!]) completion(nil, e) } else { let e = NSError(domain: "", code: 401, userInfo: [NSLocalizedDescriptionKey: "unknown error"]) completion(nil, e) } default: completion(dict, nil) } } catch { print("error with parsing json data") let e = NSError(domain: "", code: 401, userInfo: [NSLocalizedDescriptionKey: "parsing unknown error"]) completion(nil, e) } } }.resume() } } <file_sep>/Miacommunity/ViewControllers/Forum/SubForumVC.swift // // SubForumVC.swift // Miacommunity // // Created by admin on 12/6/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit import SVProgressHUD class SubForumVC: UIViewController { @IBOutlet weak var tableView: UITableView! var model = SubForumViewModel() var node: MiaNode? var errorHandler: ((Error)->())? override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. errorHandler = { error in SVProgressHUD.dismiss() let alert = UIAlertController(title: "", message: error.localizedDescription, preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } } override func viewWillAppear(_ animated: Bool) { super.viewWillAppear(animated) navigationItem.title = node!.title print("ForumCategoryVC: node id: \(node!.node_id!), node type: \(node!.type!)") loadNodes() } /* // MARK: Navigation // In a storyboard-based application, you will often want to do a little preparation before navigation override func prepare(for segue: UIStoryboardSegue, sender: Any?) { // Get the new view controller using segue.destination. // Pass the selected object to the new view controller. } */ // private method private func loadNodes() { SVProgressHUD.show() model.getNodes(completion: { self.loadSubForum() if self.node?.type == .forum { self.loadThread() } else { SVProgressHUD.dismiss() self.tableView.reloadData() } }) { (error) in self.errorHandler!(error) } } private func loadSubForum() { guard model.nodeTreeMap != nil, node != nil else { return } guard model.nodeTreeMap![String(node!.node_id!)] != nil else { return } var nodes = [MiaNode]() let nodeMap = (model.nodeTreeMap![String(node!.node_id!)] as! [Int]) for nodeID in nodeMap { let dict = model.fetchDict(nodeID) let node = MiaNode(dict: dict!) nodes.append(node) } model.forums = nodes } private func loadThread() { guard model.nodeTreeMap != nil, node != nil else { SVProgressHUD.dismiss() return } model.getThreads(id: node!.node_id!, completion: { (array) in var threads = [MiaThread]() for dict in array { let thread = MiaThread(dict: dict as! [String: Any]) threads.append(thread) } self.model.threads = threads SVProgressHUD.dismiss() self.tableView.reloadData() }) { (error) in self.errorHandler!(error) } } } // MARK: - table view data source and delegate extension SubForumVC: UITableViewDataSource, UITableViewDelegate { func numberOfSections(in tableView: UITableView) -> Int { var count = 0 count = count + (model.forums.count > 0 ? 1 : 0) count = count + (model.threads.count > 0 ? 1 : 0) return count } func tableView(_ tableView: UITableView, viewForHeaderInSection section: Int) -> UIView? { if section == 0, model.forums.count > 0 { // for forums let view = UIView(frame: CGRect(x: 0, y: 0, width: tableView.frame.width, height: 44)) view.backgroundColor = UIColor.miaLightOcean let label = UILabel(frame: CGRect(x: 10, y: 0, width: view.frame.width - 20, height: view.frame.height)) label.textColor = UIColor.miaOceanBlue label.font = UIFont.boldSystemFont(ofSize: 14) label.text = "Forums" view.addSubview(label) return view } // for threads, show "post threads" let view = UIView(frame: CGRect(x: 0, y: 0, width: tableView.frame.width, height: 188)) view.backgroundColor = UIColor.white let lblContainer = UIView(frame: CGRect(x: 0, y: 0, width: view.frame.width, height: 44)) lblContainer.backgroundColor = UIColor.miaLightOcean view.addSubview(lblContainer) let label = UILabel(frame: CGRect(x: 10, y: 0, width: view.frame.width - 20, height: 44)) label.textColor = UIColor.miaOceanBlue label.font = UIFont.boldSystemFont(ofSize: 14) label.text = "Threads" view.addSubview(label) let containerView = UIView(frame: CGRect(x: 11, y: label.frame.maxY + 11, width: view.frame.width - 22, height: 125)) containerView.backgroundColor = UIColor.miaLightGrey view.addSubview(containerView) let imgView = UIImageView(frame: CGRect(x: 10, y: 10, width: 30, height: 30)) imgView.layer.cornerRadius = imgView.frame.width / 2 imgView.clipsToBounds = true containerView.addSubview(imgView) if let avatar = curUser?.avatar { imgView.sd_setImage(with: URL(string: avatar), placeholderImage: UIImage(named: "icoAvatar")) } else { imgView.image = UIImage(named: "icoAvatar") } let txtTitle = UITextField(frame: CGRect(x: 50, y: 10, width: containerView.frame.width - 50 - 10, height: 30)) txtTitle.borderStyle = .bezel txtTitle.backgroundColor = .white txtTitle.placeholder = "Thread Title" containerView.addSubview(txtTitle) let txtMsg = UITextField(frame: CGRect(x: 50, y: 45, width: containerView.frame.width - 50 - 10, height: 30)) txtMsg.borderStyle = .bezel txtMsg.backgroundColor = .white txtMsg.placeholder = "Message..." containerView.addSubview(txtMsg) let btnPost = UIButton(frame: CGRect(x: containerView.frame.width - 11 - 120, y: txtMsg.frame.maxY + 10, width: 120, height: 30)) btnPost.backgroundColor = UIColor.miaOceanBlue btnPost.layer.cornerRadius = 3.0 btnPost.clipsToBounds = true containerView.addSubview(btnPost) let imgPost = UIImageView(frame: CGRect(x: 10, y: 0, width: 20, height: 30)) imgPost.image = UIImage(named: "icoEdit") imgPost.contentMode = .scaleAspectFit btnPost.addSubview(imgPost) let lblTitle = UILabel(frame: CGRect(x: 25, y: 0, width: btnPost.frame.width - 25, height: 30)) lblTitle.text = "Post Thread" lblTitle.textAlignment = .center lblTitle.font = UIFont.systemFont(ofSize: 11.0) lblTitle.textColor = UIColor.white btnPost.addSubview(lblTitle) return view } func tableView(_ tableView: UITableView, heightForHeaderInSection section: Int) -> CGFloat { if section == 0, model.forums.count > 0 { // for forums return 44 } return 188 } func tableView(_ tableView: UITableView, viewForFooterInSection section: Int) -> UIView? { if section == 0, model.forums.count > 0 { // for forums let view = UIView(frame: CGRect(x: 0, y: 0, width: tableView.frame.width, height: 44)) let btnSubscribe = UIButton(frame: CGRect(x: (view.frame.width - 120) / 2, y: 14, width: 120, height: 25)) btnSubscribe.backgroundColor = UIColor.white view.addSubview(btnSubscribe) let imgView = UIImageView(frame: CGRect(x: 16, y: 0, width: 18, height: 25)) imgView.image = UIImage(named: "icoSubscribe_normal") imgView.contentMode = .scaleAspectFit btnSubscribe.addSubview(imgView) let lblTitle = UILabel(frame: CGRect(x: 25, y: 0, width: btnSubscribe.frame.width - 25, height: 25)) lblTitle.text = "Subscribe" lblTitle.textAlignment = .center lblTitle.font = UIFont.systemFont(ofSize: 11.0) lblTitle.textColor = UIColor.gray btnSubscribe.addSubview(lblTitle) return view } return UIView() } func tableView(_ tableView: UITableView, heightForFooterInSection section: Int) -> CGFloat { if section == 0, model.forums.count > 0 { // for forums return 44 } return 0.0 } func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return (section == 0 ? model.forums.count : model.threads.count) } func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { if indexPath.section == 0, model.forums.count > 0 { // forums let cell = tableView.dequeueReusableCell(withIdentifier: "ForumNodeTVCell", for: indexPath) as! ForumNodeTVCell let item = model.forums[indexPath.row] cell.imgIcon.image = UIImage(named: "icoForumItem") cell.lblTitle.text = item.title cell.viewContainer.isHidden = true cell.lblNote.isHidden = false //cell.btnSubscribe.addTarget(self, action: #selector(btnSubcribeTapped(_:)), for: .touchUpInside) cell.btnSubscribe.tag = item.node_id! cell.imgSubscribe.image = UIImage(named: "icoSubscribe_normal") if let data = item.data, data.msgCount! > 0 { cell.viewContainer.isHidden = false cell.lblNote.isHidden = true cell.lblUser.text = data.creator! cell.lblMsg.text = data.message! cell.lblMessages.text = String(data.msgCount!) cell.lblThreads.text = String(data.threadCount!) cell.lblDate.text = data.date!.getString() cell.imgAvatar.layer.cornerRadius = cell.imgAvatar.frame.width / 2 cell.imgAvatar.clipsToBounds = true cell.imgAvatar.layer.borderColor = UIColor.lightGray.cgColor cell.imgAvatar.layer.borderWidth = 1.0 } return cell } else { // threads let cell = tableView.dequeueReusableCell(withIdentifier: "ForumThreadTVCell", for: indexPath) as! ForumThreadTVCell let item = model.threads[indexPath.row] cell.lblTitle.text = item.title cell.lblDate.text = item.createDate!.getString() cell.lblCategory.text = item.category if item.replies! > 0 { cell.lblReplies.text = String(item.replies!) + " replies" } else { cell.lblReplies.text = "reply" } //cell.btnReply.addTarget(self, action: #selector(btnReplyTapped(_:)), for: .touchUpInside) cell.imgSubscribe.image = UIImage(named: "icoSubscribe_normal") cell.btnSubscribe.tag = item.thread_id! //cell.btnSubscribe.addTarget(self, action: #selector(btnSubcribeTapped(_:)), for: .touchUpInside) if let creator = item.creator { cell.lblUser.text = creator.userName if let avatar = creator.avatar { cell.imgAvatar.sd_setImage(with: URL(string: avatar), placeholderImage: UIImage(named: "icoAvatar")) } else { cell.imgAvatar.image = UIImage(named: "icoAvatar") } } else { cell.lblUser.text = "unknown user" cell.imgAvatar.image = UIImage(named: "icoAvatar") } cell.imgAvatar.layer.cornerRadius = cell.imgAvatar.frame.width / 2 cell.imgAvatar.clipsToBounds = true cell.imgAvatar.layer.borderColor = UIColor.lightGray.cgColor cell.imgAvatar.layer.borderWidth = 1.0 return cell } } func tableView(_ tableView: UITableView, heightForRowAt indexPath: IndexPath) -> CGFloat { return (indexPath.section == 0 ? 95 : 142) } func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) { tableView.deselectRow(at: indexPath, animated: true) if indexPath.section == 0, model.forums.count > 0 { let item = model.forums[indexPath.row] let storyboard = UIStoryboard(name: "Forum", bundle: nil) let vc = storyboard.instantiateViewController(withIdentifier: "SubForumVC") as! SubForumVC vc.node = item self.navigationController?.pushViewController(vc, animated: true) } else { // let item = model.threads[indexPath.row] // let storyboard = UIStoryboard(name: "Forum", bundle: nil) // let vc = storyboard.instantiateViewController(withIdentifier: "ForumPostVC") as! ForumPostVC // vc.thread = item // self.navigationController?.pushViewController(vc, animated: true) } } } <file_sep>/Miacommunity/ViewControllers/Message/MessageVC.swift // // MessageVC.swift // Miacommunity // // Created by com on 9/20/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit import ChatViewController import SVProgressHUD class MessageVC: ChatViewController { var messages = [MiaMessage]() var bubbleStyle: BubbleStyle = .facebook var room: MiaRoom? override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. setupUI() chatBarView.textView.delegate = self navigationItem.title = "Message" let callBtn = UIBarButtonItem(image: UIImage(named: "icoCall"), style: .plain, target: self, action: #selector(btnCallTapped(_:))) let videoBtn = UIBarButtonItem(image: UIImage(named: "icoVideo"), style: .plain, target: self, action: #selector(btnCallTapped(_:))) navigationItem.rightBarButtonItems = [videoBtn, callBtn] loadMessage() } /* override func prepare(for segue: UIStoryboardSegue, sender: Any?) { } */ // MARK: button action @objc func btnCallTapped(_ sender: Any) { let alert = UIAlertController(title: nil, message: "This function is developing now", preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } // MARK: load messages private func loadMessage() { let errorHandler: ((String) -> ()) = { msg in let alert = UIAlertController(title: "", message: msg, preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } SVProgressHUD.show() let req = MiaRequest.message(id: room!.roomID) req.sendRequest { (dict, error) in SVProgressHUD.dismiss() guard error == nil else { errorHandler(error!.localizedDescription) return } if let tmp = dict!["messages"] { let messages = tmp as! [Any] for item in messages { let msg = MiaMessage(dict: item as! [String: Any]) self.messages.insert(msg, at: 0) } } self.tableView.reloadData() } } // MARK: private message methods private func setupUI() { /// Tableview tableView.estimatedRowHeight = 88 tableView.keyboardDismissMode = .none tableView.register(MessageTextCell.self, forCellReuseIdentifier: MessageTextCell.reuseIdentifier) } private func updateUI() { tableView.reloadData() } private func addMessage(_ message: MiaMessage) { self.messages.insert(message, at: 0) // Insert new message cell tableView.beginUpdates() tableView.insertRows(at: [IndexPath(row: 0, section: 0)], with: .none) tableView.endUpdates() // Check if we have more than one message switch self.bubbleStyle { case .facebook: if self.messages.count <= 1 { return } reloadLastMessageCell() default: break } } private func reloadLastMessageCell() { tableView.beginUpdates() let lastIndexPath = IndexPath(row: 1, section: 0) let cell = tableView.cellForRow(at: lastIndexPath) as? MessageCell let positionInBlock = self.getPositionInBlockForMessageAtIndex(lastIndexPath.row) cell?.updateLayoutForBubbleStyle(self.bubbleStyle, positionInBlock: positionInBlock) cell?.roundViewWithBubbleStyle(self.bubbleStyle, positionInBlock: positionInBlock) tableView.endUpdates() tableView.scrollToFirstCell() } func getPositionInBlockForMessageAtIndex(_ index: Int) -> PositionInBlock { let message = messages[index] if let beforeItemMessage = messages.item(before: index), let afterItemMessage = messages.item(after: index) { if beforeItemMessage.sender?.uid == message.sender?.uid && message.sender?.uid == afterItemMessage.sender?.uid { return .center } if beforeItemMessage.sender?.uid == message.sender?.uid { return .bottom } if message.sender?.uid == afterItemMessage.sender?.uid { return .top } return .single } if let beforeItemMessage = messages.item(before: index) { if beforeItemMessage.sender?.uid == message.sender?.uid { return .bottom } return .single } if let afterItemMessage = messages.item(after: index) { if afterItemMessage.sender?.uid == message.sender?.uid { return .top } return .single } return .single } // MARK: override chat methods override func numberOfSections(in tableView: UITableView) -> Int { return 1 } override func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return self.messages.count } override func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { let message = self.messages[indexPath.row] var cellIdentifer = "" switch message.type! { case .text: cellIdentifer = MessageTextCell.reuseIdentifier break } let cell = tableView.dequeueReusableCell(withIdentifier: cellIdentifer, for: indexPath) as! MessageCell let positionInBlock = self.getPositionInBlockForMessageAtIndex(indexPath.row) cell.transform = tableView.transform cell.bind(withMessage: message, user: message.sender!) cell.updateUIWithBubbleStyle(self.bubbleStyle, isOutgoingMessage: (message.sender?.uid == curUser?.uid)) cell.updateLayoutForBubbleStyle(self.bubbleStyle, positionInBlock: positionInBlock) return cell } func tableView(_ tableView: UITableView, willDisplay cell: UITableViewCell, forRowAt indexPath: IndexPath) { let chatCell = cell as! MessageCell let positionInBlock = self.getPositionInBlockForMessageAtIndex(indexPath.row) chatCell.layoutIfNeeded() // Update UI for cell chatCell.showHideUIWithBubbleStyle(self.bubbleStyle, positionInBlock: positionInBlock) chatCell.updateAvatarPosition(bubbleStyle: self.bubbleStyle) chatCell.roundViewWithBubbleStyle(self.bubbleStyle, positionInBlock: positionInBlock) } override func didPressSendButton(_ sender: Any?) { // guard model.opUser != nil else { // return // } let msg = MiaMessage(type: .text, sender: curUser!, sentDate: Date(timeIntervalSinceNow: 0), messageText: chatBarView.textView.text) addMessage(msg) let errorHandler: ((String) -> ()) = { msg in let alert = UIAlertController(title: "", message: msg, preferredStyle: .alert) alert.addAction(UIAlertAction(title: "OK", style: .cancel, handler: nil)) self.present(alert, animated: true, completion: nil) } let req = MiaRequest.sendmessage(id: room!.roomID, text: msg.messageText!) req.sendRequest { (dict, error) in guard error == nil else { errorHandler(error!.localizedDescription) return } } super.didPressSendButton(sender) } override func didPressGalleryButton(_ sender: Any?) { btnCallTapped(self) } override func didSelectVideo(url: URL?) { print("URL \(url!)") } override func didSelectImage(url: URL?) { print("URL \(url!)") } override func textView(_ textView: UITextView, shouldChangeTextIn range: NSRange, replacementText text: String) -> Bool { if (text == "\n") { self.didPressSendButton(textView) return false } return true } } <file_sep>/Miacommunity/Utils/Date.swift // // Date.swift // Miacommunity // // Created by admin on 10/24/19. // Copyright © 2019 KMHK. All rights reserved. // import Foundation extension Date { func getString() -> String { let format = DateFormatter() format.dateFormat = "MMM d, yyyy" return format.string(from: self) } } <file_sep>/Miacommunity/ViewControllers/Home/MainNavVC.swift // // MainNavVC.swift // Miacommunity // // Created by com on 9/20/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit import SVProgressHUD import SideMenu class MainNavVC: UINavigationController { override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. // hide back button string let attributes = [NSAttributedString.Key.font: UIFont(name: "Helvetica-Bold", size: 0.1)!, NSAttributedString.Key.foregroundColor: UIColor.clear] let BarButtonItemAppearance = UIBarButtonItem.appearance() BarButtonItemAppearance.setTitleTextAttributes(attributes, for: .normal) BarButtonItemAppearance.setTitleTextAttributes(attributes, for: .highlighted) // set background color of navigation bar let navBarAppearance = UINavigationBar.appearance() navBarAppearance.barTintColor = UIColor.miaDarkGreen navBarAppearance.tintColor = UIColor.white navBarAppearance.isTranslucent = false // show 1 px line under navigation bar UIGraphicsBeginImageContext(CGSize(width: UIScreen.main.bounds.size.width, height: 1)) let context = UIGraphicsGetCurrentContext() context?.setFillColor(UIColor.miaDarkGreen!.cgColor) context?.fill(CGRect(x: 4, y: 0, width: UIScreen.main.bounds.size.width - 8, height: 1)) let image = UIGraphicsGetImageFromCurrentImageContext() UIGraphicsEndImageContext() navBarAppearance.shadowImage = image // set title color navBarAppearance.titleTextAttributes = [NSAttributedString.Key.foregroundColor: UIColor.white] // for ProgressHUD SVProgressHUD.setDefaultMaskType(.clear) // for side menu SideMenuManager.default.menuPresentMode = .menuSlideIn SideMenuManager.defaultManager.menuFadeStatusBar = false //SideMenuManager.default.menuBlurEffectStyle = .dark } override func viewWillAppear(_ animated: Bool) { super.viewWillAppear(animated) } override func viewDidAppear(_ animated: Bool) { super.viewDidAppear(animated) } override var preferredStatusBarStyle: UIStatusBarStyle { return .lightContent } @objc func btnMenuTapped(_ sender: Any) { performSegue(withIdentifier: "segueMenu", sender: nil) } func chooseRootVC(index: Int) { if index == 0 { // home let storyboard = UIStoryboard(name: "Main", bundle: nil) let controller = storyboard.instantiateViewController(withIdentifier: "HomeVC") self.viewControllers = [controller] } else if index == 4 { // forum let storyboard = UIStoryboard(name: "Forum", bundle: nil) let controller = storyboard.instantiateInitialViewController() self.viewControllers = [controller] as! [UIViewController] } else if index == 5 { // message let storyboard = UIStoryboard(name: "Message", bundle: nil) let controller = storyboard.instantiateInitialViewController()//storyboard.instantiateViewController(withIdentifier: "MessageVC") self.viewControllers = [controller] as! [UIViewController] } } // MARK: Navigation // In a storyboard-based application, you will often want to do a little preparation before navigation override func prepare(for segue: UIStoryboardSegue, sender: Any?) { if segue.identifier == "segueMenu" { let vc = segue.destination as! UISideMenuNavigationController let menuVC = vc.topViewController as! MenuVC menuVC.parentVC = self } } } <file_sep>/Miacommunity/ViewModel/SubForumViewModel.swift // // SubForumViewModel.swift // Miacommunity // // Created by admin on 12/6/19. // Copyright © 2019 KMHK. All rights reserved. // import UIKit class SubForumViewModel: NSObject { var nodeTreeMap: [String: Any]? var allNodes: [Any]? var forums = [MiaNode]() var threads = [MiaThread]() func fetchDict(_ id: Int) -> [String: Any]? { for node in allNodes! { let dict = node as! [String: Any] if let nodeID = dict["node_id"], (nodeID as! Int) == id { return dict } } return nil } func getNodes(completion: @escaping (()->()), failed: @escaping ((Error)->())) { let request = MiaRequest.getnodes request.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } if let tree = dict!["tree_map"] { self.nodeTreeMap = (tree as! [String: Any]) } if let nodes = dict!["nodes"] { self.allNodes = (nodes as! [Any]) } completion() } } func getNode(id: Int, completion: @escaping ((MiaNode)->()), failed: @escaping ((Error)->())) { let request = MiaRequest.getnode(id: id) request.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } if let nodeDict = dict!["node"] { let node = MiaNode(dict: nodeDict as! [String: Any]) completion(node) } else { let e = NSError(domain: "", code: 401, userInfo: [NSLocalizedDescriptionKey: "unknown error"]) failed(e) } } } func getThreads(id: Int, completion: @escaping (([Any])->()), failed: @escaping ((Error)->())) { let request = MiaRequest.getthreads(id: id) request.sendRequest { (dict, error) in guard error == nil else { failed(error!) return } if let threads = dict!["threads"]{ completion(threads as! [Any]) } else { let e = NSError(domain: "", code: 401, userInfo: [NSLocalizedDescriptionKey: "unknown error"]) failed(e) } } } }
e7e8db2945cb3e910d8c350010468adc432e0aa6
[ "Swift", "Ruby" ]
26
Swift
kmhk/Miacommunity_iOS
f85a58531f17131b84950e1ed464d9c53ac531df
db244fc6e5b044923bb1ceb516860ad45a3115a4
refs/heads/main
<file_sep>import 'App.css'; function App() { return ( <div className="container mx-auto pt-2"> <h1 className="text-6xl">Hello React</h1> </div> ); } export default App;
dac142b4abae13eece30b93db96642be50670560
[ "JavaScript" ]
1
JavaScript
noopik/microclass-memperpage
55380d2a470e700010a91b845a7ba3225ce0ed99
2de0a5d391e384feb504872f7faa0c6cb72976a3
refs/heads/master
<repo_name>harshadodda/CrackingTheCodeInterviewQuestions-6thEdition-<file_sep>/src/PlateStack.java import java.util.ArrayList; /** * This class implements a data structure that consists of a few stacks, when one stack gets full * the next stack is used. The default capacity of the stacks is 3. These stacks are imagined to * be one on top of another such that the current stack is at the top and the oldest stack is at * the bottom. * @author <NAME> * */ public class PlateStack { public ArrayList<Stack> stackList = new ArrayList<Stack>(); public int currentStack = 0; public void push(int value) { Stack stack = stackList.get(this.currentStack); if(stack.getNumValues() >= stack.getCapacity()) { Stack newStack = new Stack(); this.stackList.add(newStack); newStack.push(value); newStack.setNumValues(newStack.getNumValues() + 1); this.currentStack++; } else { stack.push(value); } } /** * This pops the current stacks top value which is the last value to be put in to any stack. * @return, the value that is popped off of the tallest stack(current stack) */ public int pop() { int value = 0; Stack stack = stackList.get(this.currentStack); value = stack.pop(); if(stack.top == -1) { this.stackList.remove(stack); this.currentStack--; } return value; } /** * Prints the contents of every stack in order from first to last. Last being the stack that * is the current stack and the stack where the most recent push and pops occur. */ public void print() { for(Stack s : stackList) { s.print(); } } } <file_sep>/src/AnimalNode.java public class AnimalNode { int id; // unique id for each animal String type; // represents the type of the animal public AnimalNode(int id, String type) { this.id = id; this.type = type; } } <file_sep>/src/CH3_SortStack.java /** * * 3.5 Sort Stack: Write a program to sort a stack such that the smallest items are on the top. * You can use an additional temporary stack, but you may not copy the elements into any other * data structure (such as an array). The stack supports the following operations: push, pop, * peek, and isEmpty. * * @author <NAME> * */ public class CH3_SortStack { public static void main(String[] args) { Stack s1 = new Stack(); Stack s2 = new Stack(); Stack sorted = new Stack(); Stack reversedStack = new Stack(); s1.push(1); s1.push(5); s1.push(3); s1.push(6); s1.push(4); System.out.println("Here is the unsorted stack:"); s1.print(); sorted = sortStack(s1, s2); reversedStack = reverseStack(sorted); System.out.println("Here is the sorted stack(the last element is the on to be popped first):"); reversedStack.print(); } public static Stack sortStack(Stack s1, Stack s2) { int value = -1; int temp = -1; while(value == -1) { // this effectively tanslates to while the stack temp stack, s1, is not empty value = s1.pop(); if(s2.isEmpty()) { // if the result stack is empty, push the value in s2.push(value); } else if(value > s2.peek()) { // if the value already in the result stack is lower than the popped // value from the temp stack, push the value in s2.push(value); } else if(value < s2.peek()) { // if the value already in the result stack is higher than the popped // value form the temp stack while(value < s2.peek()) { temp = s2.pop(); // pop the value on the result stack s1.push(temp); // push it into the temp stack if(s2.isEmpty()) { // if the result stack is empty, break break; } } s2.push(value); // push the value popped from the temp stack into the result stack, then the // algorithm automatically re-insterts the values we put into the temp stack back to the // result stack, in the correct order value = -1; } value = -1; if(s1.isEmpty()) { value = 0; } } return s2; } /** * Reverses the order of a stack * @param s1, the stack to reverse * @return, the reversed stack */ public static Stack reverseStack(Stack s1) { Stack retStack = new Stack(); int value = 0; while(!s1.isEmpty()) { value = s1.pop(); retStack.push(value); } return retStack; } } <file_sep>/src/CH1_ZeroMatrix.java import java.util.Scanner; import java.math.*; /** * 1.8 Zero Matrix: Write an algorithm such that if an element * in an MxN matrix is 0, its entire row and column are set to O. * * Runtime: O(N), in this case because even though the input is * M*N in size, we only go through the input once which is the * definition of O(N) * * @author <NAME> * */ public class CH1_ZeroMatrix { public static void main(String[] args) { // TODO Auto-generated method stub Scanner sc = new Scanner(System.in); int m = 0; int n = 0; System.out.println("We are making an M*N matrix, please enter M: "); m = sc.nextInt(); System.out.println("Please enter N: "); n = sc.nextInt(); System.out.println("Here is the matrix after randomization: "); int[][] mat = new int[m][n]; int randomM = (int)(Math.random() * m); int randomN = (int)(Math.random() * n); for(int i = 0; i < m; i++) { for(int j = 0; j < n; j++) { if(i == randomM && j == randomN) { mat[i][j] = 0; } else { int random = (int)(Math.random() * 100); mat[i][j] = random; } } } for(int i = 0; i < m; i++) { for(int j = 0; j < n; j++) { System.out.print(" " + mat[i][j] + " "); } System.out.println(); } mat = setMatrixToZero(mat); System.out.println("Here is the matrix after adding the zeros: "); for(int i = 0; i < m; i++) { for(int j = 0; j < n; j++) { System.out.print(" " + mat[i][j] + " "); } System.out.println(); } sc.close(); } public static int[][] setMatrixToZero(int[][] mat) { boolean[] ms = new boolean[mat.length]; // keeps track of which columns need to be 0 boolean[] ns = new boolean[mat[0].length]; // keeps track of which rows need to be 0 for(int i = 0; i < mat.length; i++) { for(int j = 0; j < mat[i].length; j++) { if(mat[i][j] == 0) { // sets the columns and rows that need to be 0 ms[i] = true; ns[j] = true; } } } for(int i = 0; i < mat.length; i++) { for(int j = 0; j < mat[i].length; j++) { if(ms[i] == true || ns[j] == true) { mat[i][j] = 0; // makes the columns and rows that are marked true = to 0 in the real matrix } } } return mat; } } <file_sep>/src/CH1_StringCompression.java import java.util.ArrayList; import java.util.Scanner; /** * * 1.6 String Compression: String Compression: Implement a method to perform basic string * compression using the counts of repeated characters. For example, the string aabcccccaaa * would become a2b1c5a3. If the "compressed" string would not become smaller than the * original string, your method should return the original string. You can assume the string * has only uppercase and lowercase letters (a - z). * * Runtime: O(N) or O(M), we go through at maximum the string given or the larger "compressed" stirng * * @author <NAME> * */ public class CH1_StringCompression { public static void main(String[] args) { Scanner sc = new Scanner(System.in); System.out.println("Please enter a string to compress: "); String input = sc.nextLine(); String compressedStr = ""; input = input + "\0"; compressedStr = compressString(input); // if the compressed string is larger, print the original string instead if(compressedStr.length() > input.length()) { System.out.println("The compressed string was longer here is the original string: " + input); } else { System.out.println("Here is the compressed string: " + compressedStr); } sc.close(); } /** * Takes in a string and compresses it into a potentially smaller string * EX. aaaabbbaa becomes a4b3a2 * @param str, the string to compress * @return returns the compressed string in the format mentioned in the example above */ public static String compressString(String str) { String compressedStr = ""; char[] chars = new char[str.length()]; chars = str.toCharArray(); ArrayList<String> newCharArr = new ArrayList<String>(); int letterCounter = 0; char compChar = str.charAt(0); for(int i = 0; i < str.length(); i++) { if(chars[i] == compChar) { letterCounter++; // if the letter is the same as the one we are currently on in the string, increase the letter count } else { // if we see a character that is new we must add the old character and the number of times we saw it into the new array newCharArr.add(String.valueOf(compChar)); // add the character into the new array newCharArr.add(String.valueOf(letterCounter)); // add the number of times the new character occured into the new array compChar = chars[i]; // update the character to compare to letterCounter = 1; // update the number of times we have seen the new character } } for(String character : newCharArr) { compressedStr += character; // loop through the Array List and construct the compressed string from it } return compressedStr; } } <file_sep>/src/CH1_OneAway.java import java.util.HashMap; import java.util.Scanner; /** * * 1.5 One Away: There are three types of edits that can be performed * on strings: insert a character, remove a character, or replace a character. * Given two strings, write a function to check if they are one edit (or zero edits) away. * * EXAMPLE * pale, ple -> true * pales, pale -> true * pale, bale -> true * pale, bake -> false * * @author <NAME> * */ public class CH1_OneAway { public static void main(String[] args) { Scanner sc = new Scanner(System.in); String str1 = ""; String str2 = ""; boolean oneAway = true; System.out.println("Please enter the starting string: "); str1 = sc.nextLine(); System.out.println("Please enter the resulting string: "); str2 = sc.nextLine(); oneAway = checkOneAway(str1, str2); System.out.println("are " + str1 + " and " + str2 + " are one away? " + oneAway); sc.close(); } public static boolean checkOneAway(String str1, String str2) { boolean oneAway = true; HashMap<Character, Integer> map1 = new HashMap<Character, Integer>(); HashMap<Character, Integer> map2 = new HashMap<Character, Integer>(); for(int i = 0; i < str1.length(); i++) { map1.put(str1.charAt(i), map1.getOrDefault(str1.charAt(i), 0) + 1); } for(int i = 0; i < str2.length(); i++) { map2.put(str2.charAt(i), map2.getOrDefault(str2.charAt(i), 0) + 1); } if(str1.equals(str2)) { return true; } else if(str1.length() - 1 == str2.length() || str2.length() - 1 == str2.length()) { // check remove 1 char/ add one char int oneCharExtra = 0; String smallerStr = smallerString(str1, str2); for(int i = 0; i < smallerStr.length(); i++) { int val1 = map1.get(smallerStr.charAt(i)); int val2 = map2.getOrDefault(smallerStr.charAt(i), 0); if(val1 != val2) { oneCharExtra++; } if(oneCharExtra > 1) { return false; } } } return oneAway; } public static String smallerString(String str1, String str2) { if(str1.length() < str2.length()) { return str1; } else { return str2; } } } <file_sep>/src/CH3_StackMin.java /** * 3.2 Stack Min: How would you design a stack which, in addition to push and pop, has a function * min which returns the minimum element? Push, pop and min should all operate in 0(1) time. * * @author <NAME> * */ /* * * You can have a variable that keeps track of the min number as it gets pushed so the first time * we push in we make the min variable equal to the thing and then the next thing we push, we * check if it is smaller than the current min, and if it is we update the min, if it is not, we * move on. When we are popping a value, if it is not the min value we pop the value without * changing the min value. If we pop the min value after checking that it is in fact the min * value, we must go through the array/stack to see the next lowest value excluding the value we * are popping. This may be O(N) time for this one search, but this will be amortized to become * an O(1) overall runtime because we only encounter the O(N) runtime every now and again. This * will be amortized to O(1) time overall. Pop and push are O(1) still. * */ public class CH3_StackMin { } <file_sep>/src/CH1_CheckPermutation.java import java.util.Arrays; import java.util.HashMap; import java.util.Scanner; /** * 1.2 Check Permutation: Given two strings, write a method to decide if * one is a permutation of the other * * Runtime: O(NlogN) (sorting method takes NlogN time) or, * O(N) (hashmap method takes N time) * * @author <NAME> * */ public class CH1_CheckPermutation { public static void main(String[] args) { Scanner sc = new Scanner(System.in); String str1 = ""; String str2 = ""; boolean isPermutation = false; System.out.println("Enter the first string: "); str1 = sc.nextLine(); System.out.println("Enter the second string: "); str2 = sc.nextLine(); isPermutation = checkPermutation(str1, str2); System.out.println("sorting method returned: " + isPermutation); // check for permutation using sorting method isPermutation = checkPermutation2(str1, str2); System.out.println("hash map method returned: " + isPermutation); // check for permutation using hash map method System.out.println("Bye."); sc.close(); } /** * Checks character by character to see if two strings are permutations of each other * using a sorting method * @param str1, first string to be checked * @param str2, second string to be checked * @return true if the strings are permutations of each other, false if not */ private static boolean checkPermutation(String str1, String str2) { boolean isPermutation = true; if(str1.length() != str2.length()) { return false; } char[] chars1 = str1.toCharArray(); char[] chars2 = str2.toCharArray(); Arrays.sort(chars1); Arrays.sort(chars2); str1 = new String(chars1); // make strings from sorted char arrays str2 = new String(chars2); for(int i = 0; i < str1.length(); i++) { if(str1.charAt(i) != str2.charAt(i)) { // check character by character to see if characters are same isPermutation = false; } } return isPermutation; } /** * Checks character by character to see if two strings are permutations of each other * using a hash map method * @param str1, the first string to be checked * @param str2, the second string to be checked * @return true if the strings are permutations of each other, false if not */ private static boolean checkPermutation2(String str1, String str2) { boolean isPermutation = true; HashMap<Character, Integer> map1 = new HashMap<Character, Integer>(); HashMap<Character, Integer> map2 = new HashMap<Character, Integer>(); if(str1.length() != str2.length()) { return false; } for(int i = 0; i < str1.length(); i++) { map1.put(str1.charAt(i), map1.getOrDefault(str1.charAt(i), 0) + 1); // add all characters from string 1 to hash map map2.put(str2.charAt(i), map2.getOrDefault(str2.charAt(i), 0) + 1); // add all characters from string 2 to hash map } for(int i = 0; i < str1.length(); i++) { Integer val1 = 0; Integer val2 = 0; Integer index; // need Integer because of potential null return value val1 = map1.get(str1.charAt(i)); index = str2.indexOf(str1.charAt(i)); // reutrns -1 if character is not in string if(index < 0) { return false; } val2 = map2.get(str2.charAt(index)); if(val1 != val2) { isPermutation = false; } } return isPermutation; } } <file_sep>/src/CH1_URLify.java import java.util.Scanner; /** * 1.3 URLify: Write a method to replace all spaces in a string with %20. You may assume * that the string has sufficient space at the end to hold the additional characters, and * that you are given the "true" length of the string. Try to do this in place with a char * array * * Example: * "Mr <NAME>" * * Runtime: O(N) (not inplace method, O(N) space taken) * O(N) (inplace method, O(1) space taken) (incomplete) * * @author <NAME> * */ public class CH1_URLify { public static void main(String[] args) { Scanner sc = new Scanner(System.in); int trueLength = 0; System.out.println("Enter a string to URLify:"); String str = sc.nextLine(); trueLength = getTrueLength(str); String spaciousString = new String(getSpaciousString(str)); String retStr = new String(URLify(spaciousString, trueLength)); System.out.println("Here is your URLified string using a non-inplace method: " + retStr); //retStr = new String(URLifyInPlace(spaciousString.toCharArray(), trueLength)); //System.out.println("Here is your URLified string using an inplace method: " + retStr); System.out.println("Bye."); sc.close(); } private static int getTrueLength(String str) { return str.length(); } private static String getSpaciousString(String str) { int spaceCounter = 0; for(int i = 0; i < str.length(); i++) { if(str.charAt(i) == ' ') { spaceCounter++; } } char[] charArr = new char[str.length() + (spaceCounter * 3)]; for(int i = 0; i < charArr.length; i++) { if(i < str.length()) { charArr[i] = str.charAt(i); } else { charArr[i] = ' '; } } String retStr = new String(charArr); return retStr; } private static String URLify(String str, int trueLength) { StringBuilder retStr = new StringBuilder(); String strToInsert = "%20"; for(int i = 0; i < str.length(); i++) { if(str.charAt(i) != ' ') { retStr.append(str.charAt(i)); } else if(i < trueLength) { retStr.append(strToInsert); } } return retStr.toString(); } // attempt at in place urlify /* private static String URLifyInPlace(char[] charArr, int trueLength) { return charArr.toString(); } private static String shift(String str, int currentPlace, int shift) { for(int i = currentPlace + 1; i < str.length(); i++) { } return str; } */ } <file_sep>/src/CH3_AnimalShelter.java /** * This program runs the solution for the problem (3.6) Animal Shelter in CTCI * This program uses the helper classes AnimalList and AnimalNode * @author <NAME> * */ public class CH3_AnimalShelter { public static void main(String[] args) { AnimalList shelterList = new AnimalList(); shelterList.enqueue("dog"); shelterList.enqueue("cat"); shelterList.enqueue("cat"); shelterList.enqueue("dog"); shelterList.print(); shelterList.dequeueCat(); shelterList.print(); shelterList.dequeueAny(); shelterList.print(); shelterList.dequeueDog(); shelterList.print(); shelterList.dequeueDog(); shelterList.enqueue("cat"); shelterList.print(); } } <file_sep>/README.md # CrackingTheCodeInterviewQuestions-6thEdition- This repo contains code for various CTCI problems I completed These questions come from the 6th edition of the book
1fdf41ca9f731eba3794c6db27768788d74b07b1
[ "Markdown", "Java" ]
11
Java
harshadodda/CrackingTheCodeInterviewQuestions-6thEdition-
771c085a0a28270b7176d4d37325bc1f0e1aa43a
9d154bcb9540d51cf9f234961857eb45f57d1234
refs/heads/master
<file_sep>import React from 'react' import { Link } from 'react-router' import Button from 'bocomui/Button' import { Row, Col } from 'bocomui/Layout' import Center from 'public/Center' import Pre from 'public/Pre' import Feature from './Feature' import './index.less' export default () => { return ( <div className="home"> <div className="home__banner"> <Center> <div className="home__banner-center"> <h2>Bocom UI框架 React 组件库</h2> <h1>Bocom UI</h1> <em>版本:v1.0</em> <Link to="/guide#install" className="home__banner-install"> <Button>安装</Button> </Link> <Link to="/guide" className="home__banner-start"> <Button>开始</Button> </Link> </div> </Center> </div> <div className="home__middle"> <Center> <Row gutter> <Col col="md-12" className="home__middle-left"> <h2>交行Bocom UI框架方案</h2> <p>Bocom UI 结合可快速开发集单页面应用、组件化、可视化、大数据交互、权限管理、前后端分离等特性的项目。</p> </Col> </Row> </Center> </div> <div className="home__features"> <Center> <div className="home__features-head"> <h1>Bocom UI组件化开发</h1> <p>Bocom UI 抛弃了传统的组件封装方式,基于 React 组件开发思想,语义化 UI 的同时可作为一种数据类型自由传递。</p> </div> <Row> <Feature title="组件化" icon={require('./img/feature_01.png')}> 基于 React 组件开发思想,简单、灵活、高效 </Feature> <Feature title="覆盖广" icon={require('./img/feature_02.png')}> 覆盖基础组件,高级交互,以及计划推出的数据可视化组件 </Feature> <Feature title="生态完整" icon={require('./img/feature_03.png')}> 搭配脚手架,摆脱繁琐的环境配置、重复的基础工作 </Feature> <Feature title="交行GUIP项目" icon={require('./img/feature_04.png')}> ···· </Feature> </Row> </Center> </div> {/*<div className="home__bottom"> <Center> <h1>交行GUIP项目组Bocom UI框架</h1> </Center> </div>*/} </div> ) } <file_sep>import React from 'react' import { render } from 'react-dom' import { Router, browserHistory, Route, IndexRoute, IndexRedirect } from 'react-router' import process from 'nprogress' import App from './functions/App' render(( <Router onUpdate={() => { process.done() window.scrollTo(0, 0) }} history={browserHistory}> <Route path="/" onEnter={() => process.start()} onChange={() => process.start()} component={App} > <IndexRoute getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Home').default) }) }} /> <Route path="design" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Design').default) }) }}> <IndexRedirect to="/design/concept" /> <Route path="concept"> <IndexRedirect to="/design/concept/BasicConcept"/> <Route path=":component" getComponent={(nextState, cb) =>{ const design = nextState.location.pathname.split('/').pop(); require.ensure([], require => { cb(null, require(`./functions/Design/concept/${design}/index.js`).default) }) }} /> </Route> <Route path="principle"> <IndexRedirect to="/design/principle/align"/> <Route path=":component" getComponent={(nextState, cb) =>{ const design = nextState.location.pathname.split('/').pop(); require.ensure([], require => { cb(null, require(`./functions/Design/principle/${design}/index.js`).default) }) }} /> </Route> <Route path="base"> <IndexRedirect to="/design/base/icon-cn" /> <Route path=":component" getComponent={(nextState, cb) =>{ const design = nextState.location.pathname.split('/').pop(); require.ensure([], require => { cb(null, require(`./functions/Design/base/${design}/index.js`).default) }) }} /> </Route> </Route> <Route path="guide" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Guide').default) }) }} /> <Route path="components" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Components').default) }) }}> <IndexRedirect to="/components/Button" /> <Route path=":component" getComponent={(nextState, cb) => { const component = nextState.location.pathname.split('/').pop() require.ensure([], require => { cb(null, require(`./functions/Components/docs/${component}.doc`).default) }) }} /> </Route> <Route path="Changelog" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Changelog').default) }) }} /> <Route path="scaffolding" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Scaffolding').default) }) }}> <IndexRoute getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Scaffolding/Home').default) }) }} /> <Route path="workflow" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Scaffolding/Workflow').default) }) }} /> <Route path="docs" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Scaffolding/Docs').default) }) }} /> <Route path="changelog" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/Scaffolding/Changelog').default) }) }} /> </Route> <Route path="*" getComponent={(nextState, cb) => { require.ensure([], require => { cb(null, require('./functions/NotFound').default) }) }} /> </Route> </Router> ), document.getElementById('app')) <file_sep># 对齐 正如『格式塔学派』中的连续律(Law of Continuity)所描述的,在知觉过程中人们往往倾向于使知觉对象的直线继续成为直线,使曲线继续成为曲线。在界面设计中,将元素进行对齐,既符合用户的认知特性,也能引导视觉流向,让用户更流畅地接收信息 ## 文案类对齐 如果页面的字段或段落较短、较散时,需要确定一个统一的视觉起点。 ![doc](img/align-doc.png) 推荐左侧示例,标题和正文左对齐,使用了一个视觉起点。 不推荐右侧示例,标题和正文使用了两个视觉起点,除非刻意强调两者区别。 ## 表单类对齐 冒号对齐(右对齐)能让内容锁定在一定范围内,让用户眼球顺着冒号的视觉流,就能找到所有填写项,从而提高填写效率。 ![doc](img/align-form.png) 冒号对齐示例 ## 数字类对齐 为了快速对比数值大小,建议所有数值取相同有效位数,并且右对齐。 ![doc](img/align-number.png) 左侧为正确示例,右侧为错误示例 <file_sep>import React, { Component } from 'react' import { Nav, IndexNavItem, NavItem } from 'bocomui/Nav' import { Layout, LayoutSidebar, LayoutContent } from 'public/Layout' import designs from './designs.json' export default class extends Component { constructor() { super() this.state = { open: false } } toggle(open) { this.setState({ open }) } render() { const { open } = this.state const { children, params } = this.props return ( <Layout open={open} onToggle={open => this.toggle(open)}> <LayoutSidebar> <Nav href="/design" onItemClick={() => this.toggle(false)}> {designs.map((item, i)=>( <NavItem key={item.title} icon={item.icon} title={item.title} href={item.name} defaultOpen > {item.components.map((component) => { return ( <NavItem key={component.name} href={`${item.name}/${component.name}`} title={component.title} /> ) })} </NavItem> ))} </Nav> </LayoutSidebar> <LayoutContent> {children} </LayoutContent> </Layout> ) } } <file_sep>导航 ============= 在广义上,任何告知用户他在哪里,他能去什么地方以及如何到达那里的方式,都可以称之为导航。当设计者使用导航或者自定义一些导航结构时,请注意:    1、尽可能提供标识、上下文线索,避免用户迷路;</br>    2、保持导航样式和行为一致或者减少导航数量,降低用户学习成本;</br>    3、尽可能减少页面间的跳转(例如:一个常见任务需要多个页面跳转时,请减少至一到两次),让用户移动距离保持简短。</br> <file_sep>/** * Created by conivision on 2017/3/20. */ import React from 'react' import Markdown from 'public/Markdown' import html from './layout-cn.md' export default () => { return <Markdown html={html} /> } <file_sep># [Bcom UI框架脚手架](http://#) Bcom UI框架脚手架 - 集成 React、ES6、Webpack、ESlint 等基础服务的全部配置 - 实现单页面应用下前后端分离的开发模式 - 提供中大型项目基本结构及页面模版 - 预安装 [Bocom UI](/),快速搭建前端项目并支持自定义 UI 主色系 ```sh $ git clone [email protected]:bocomui-react.git $ npm run dev-md ``` - [工作流程](/scaffolding/workflow) - [文档说明](/scaffolding/docs) - [更新日志](/scaffolding/changelog) <file_sep>import React, { Component } from 'react' import components from './demo.less' import Button from 'bocomui/Button' import SearchInput from 'bocomui/SearchInput' import Input from 'bocomui/Input' import { Form, FormItem, FormSubmit, FormInput, FormSelect, Option, FormTextarea } from 'bocomui/Form' import DatePicker from 'bocomui/DatePicker' import Checkbox, { CheckboxGroup } from 'bocomui/Checkbox' import message from 'bocomui/message' class Demo extends Component { render() { return ( <div className="demo-center"> <Form> <FormItem label="选择日期" name="date" required> <DatePicker className="Demo-input" onSelect={this.handleDateSelect} /> </FormItem> <FormItem label="手机号" name="date" required> <Input className="Demo-input" placeholder="13921008888" /> </FormItem> <div style={{width:300}}> <FormItem label="描述" name="desc" > <FormTextarea width={40}/> </FormItem> </div> <div style={{marginLeft:88}}> <Button>确定</Button> <Button type="minor">取消</Button> </div> </Form> </div> ) } } export default Demo <file_sep>对比 ============= 对比是增加视觉效果最有效方法之一,同时也能在不同元素之间建立一种有组织的层次结构,让用户快速识别关键信息。 ## 主次关系对比 为了让用户能在操作上(类似表单、弹出框等场景)快速做出判断, 来突出其中一项相对更重要或者更高频的操作。 <img src="img/contrast-priorities-1.png" width = "450"/><img src="img/contrast-priorities-2.png" width = "450"/> 左侧为正确示例,右侧为错误示例。 在一些需要用户慎重决策的场景中,系统应该保持中立,不能替用户或者诱导用户做出判断。 ![doc](img/contrast-equal.png) 不区分主次的示例。 『通过』和『驳回』都使用次按钮,系统保持中立。 ## 总分关系对比 通过调整排版、字体、大小等方式来突出层次感,区分总分关系,使得页面更具张力和节奏感。 ![doc](img/contrast-theScoreRelationship-1.png) 总分关系示例1 ![doc](img/contrast-theScoreRelationship-2.png) 总分关系示例2 <file_sep># 更新日志 - 第一个版本 <file_sep>## v1.0 目前已是最新版本 <file_sep># 工作流程 ## 项目构建 ### 安装 [Node.js](https://nodejs.org/en) (6.x+) 安装后会集成 npm 包管理工具,设置使用交行的npm仓库。 ```sh $ npm config set registry http://172.16.17.32:7001/ ``` ### 安装脚手架 ```sh $ npm install -g create-bocomui-demo ``` ### 生成项目 ```sh $ create-bocomui-demo my-app ``` ## 开发 ```sh $ cd my-app $ npm start ``` 到此为止,前端环境配置完成,可以通过`http://127.0.0.1:9000`进行测试。 ## 部署测试/上线 ### 代码规范检查 ```sh $ npm run lint ``` ### 构建线上环境代码 ```sh $ npm run build ``` 完成后,myapp 下的 static 目录及 index.html 发送给后台即可,如 Java web 项目下的 webapp 目录 ### 修改服务器配置 脚手架采用 browser history 控制 URL,服务器也需要做相应的访问路径配置。 <file_sep>文案 ============= ## 面对用户,尽量用第二人称 人称的选择,体现了设计师是如何看待用户的。 一般来说,设计师会把用户当做一个虚拟的第三方,描述他们时往往是“用户”、“他们”,然而UI界面是以界面为媒介、与用户的直接对话,是和一个真实人类面对面的沟通。 在现实生活中,会称面前的人为“你”“您”,那在UI界面中也应称用户为“你”“您”,而不应使用第三人称“用户”“他们”。 使用第二人称,体现了与用户的近距离感,让UI界面成为一个拟人的人格,能够让用户沉浸在模拟真实对话的情境中,更有利于我们指引和鼓励用户操作。 例如: “当你点击按钮时,会弹出对话框” 优于“当用户点击按钮时,会弹出对话框”。 ![doc](img/button.png) ## 简单、直接 文案应直接,单刀直入,最大限度的降低用户的理解负担。 间接、暧昧模糊的说法,生僻和过于“文雅”的用词,都应尽量避免,因为UI文案只是沟通的工具,只有最有效的传递信息才是它的任务。 简单来讲,就是大白话,使用日常常用词,平铺直叙。 以形容词来说,“肯定的表达”比“否定的否定”更直接。 例如:“验证码错误”优于“验证码不正确”。 ![doc](img/code.png) ## 讲用户能懂的话 把用户与界面的所有交互,都当成人与人沟通来看,提醒文案便更人性化一些。简言之,说人话。 例如:不要写“对象名 XXX 无效”、“XXX 脚本错误”。 ![doc](img/object.png) 站在用户的角度,说用户熟悉的话。 ## 语气、尊重和委婉 十几年前,设计师们的主要工作确实很大比例为视觉设计,因此被粗暴地称之为「美工」。但今日世界已截然不同,把设计师细分出来就有用户体验设计、视觉设计、产品设计等,所以仍然称设计师为「美工」,毫无道理,甚至,就像知乎上有人说的: 对于设计师来说,「美工」是侮辱性的称呼,就像「代码民工」之于程序员一样。 在文案的使用上,对于某个群体的称呼是否恰当,取决于写者是否了解这个该词的真正含义。 再有,像「您」和「你」这种问题,其实没有统一标准,得看产品本身想要以哪种姿态来面对用户。 文案有时候并不是为了某个目的和功能,而是为了隐藏不能明说的原因和问题,或者只为了安抚用户情感,这时文案就不能那么生硬直接。以委婉或者暧昧的文字来向用户说明,暗示或引导用户从我们希望的角度去理解。 例如,“支付需要一些时间,请稍候”优于“支付尚未成功”; “抱歉,出现了一些问题,请稍后刷新重试”优于“系统错误”。 ![doc](img/pay.png) ## 用词准确 通用基本用词要规范,不要写错字,词语表达要完整。 专业用语要精准,并是所属行业认可通用用词;时间的表述必须明确。 例如:银行卡号与卡号 ;登录与登陆。 ![doc](img/sign.png) 可能因为绝大部分人都在用 PowerPoint 来制作幻灯片,所以,通常人们会直接使用「PPT」这个词来代指幻灯片。但你知道,能做幻灯片的还有 Keynote、Prezi ## 英文名词大和小写 正确使用专有名词的大小写规范。 例如 正确:App,错误:APP;正确:Ios,错误:IOS、ios。 ![doc](img/ios.png) ## 标点符号规范 正确得使用标点符号会让句子看起来更清晰和具有可读性。 ![doc](img/sybom.png) <file_sep>布局 ============= 布局是页面构成的前提,是后续展开交互和视觉设计的基础。Bocom UI 提供了常用的布局模板来保证同类产品间的一致性,设计者在选择布局之前,需要注意以下几点原则: 1. 明确用户在此场景中完成的主要任务和需获取的决策信息。 2. 明确决策信息和操作的优先级及内容特点,选择合理布局。 网页常见的布局结构有“国”字型布局、拐角型布局、标题正文型布局、左右框架型布局、POP型布局等 ## “国”字型 是一些大型网站所喜欢的类型,即最上面是网站的标题以及横幅广告条,接下来就是网站的主要内容,左右分列两小条内容,中间是主要部分,与左右一起罗列到底,最下面是网站的一些基本信息、联系方式、版权声明等。这种结构是我们在网上见到的差不多最多的一种结构类型。 ![doc](img/national.png) 国字型示例 ## 拐角型 这种结构与上一种其实只是形式上的区别,它去掉了“国”字形布局的最右边的部分,给主内容区释放了更多空间。这种布局上面是标题及广告横幅,接下来的左侧是一窄列链接等,右列是很宽的正文,下面也是一些网站的辅助信息。 ![doc](img/corner.png) 拐角型示例 ## 标题正文型 这种类型即最上面是标题或类似的一些东西,下面是正文,比如一些文章页面或注册页面等就是这种类 ![doc](img/Title.png) 标题正文型示例 ## 左右框架型 这是一种左右为分别两页的框架结构,一般左面是导航链接,有时最上面会有一个小的标题或标致,右面是正文。我们见到的大部分的大型论坛都是这种结构的,有一些企业网站也喜欢采用。这种类型结构非常清晰,一目了然。 ![doc](img/About.png) 左右框架型示例 ## POP型 POP引自广告术语,就是指页面布局像一张宣传海报,以一张精美图片作为页面的设计中心。常用于时尚类站点。优点显而易见:漂亮吸引人。缺点就是速度慢。作为版面布局,还是值得借鉴的。 POP型还可以使用一个Flash页面作为封面,这种布局是指整个网页就是一个Flash动画,它本身就是动态的,画面一般比较绚丽、有趣,是一种比较新潮的布局方式。由于Flash强大的功能,页面所表达的信息比普通封面型更丰富,其视觉效果及听觉效果如果处理得当,会是一种非常有魅力的布局。 ![doc](img/pop.png) POP型示例 <file_sep>字体 ============= 跨平台的字体设定,力求在各个操作系统下都有最佳展示效果。 ## 字体 字体是界面设计中最重要的基本构成之一,用户通过文本来消化内容和完成工作,优雅的字体将大大提升用户的阅读体验及工作效率。Bocom UI 的字体方案,在满足不同终端始终保持良好的阅读体验的同时,使页面的视觉层次更加清晰。使用时有以下三点需要注意: 1. 合理的使用不同的字重、字号和颜色来强调界面中最重要的信息; 2. 尽可能的使用单种字体,混合使用多种字体会让界面看起来零散和草率; 3. 遵循 WCAG 2.0 标准,字体在使用时与背景颜色的对比值满足无障碍阅读的最低标准。 **font-family:** "PingFangSC-Thin", "Lantinghei SC", "Open Sans", Arial, "Hiragino Sans GB", "Microsoft YaHei","\5FAE\8F6F\96C5\9ED1", "STHeiti", "WenQuanYi Micro Hei", SimSun, sans-serif ## 字号 ============= Bocom UI使用不同的字号和字重来传递视觉的信息层次。默认字体为 12pt,展示型页面可以设置为 14pt,其他字体字号相应升级。 ![doc](img/font-size.png) ## 行高 ============= 行高会影响阅读的体验,西文的基本行高通常是字号的 14px 上下,而中文因为字符密实且高度一致,所以一般行高需要更大,1.5em 至 1.8em 之间是一个比较好的视觉阅读效果,BocomUI 规定默认文案字体行高为 1.5em,展示型页面可根据实际情况调整行高。 ![doc](img/lineheight.png) 第一行高是21px , 字体大小是16px ; 第二行高是18px , 字体大小是12px。 ## 字体颜色 ============= 文本颜色如果和背景颜色太接近就会很难以阅读,这对于深色背景和浅色背景同样适用。 考虑到无障碍设计的需求,帮助那些弱视和色盲的用户也能轻松识别和阅读屏幕上的文字,我们参考了 WACG 2.0 的标准,文本和背景色之间至少保持最小 4.5:1 的对比度(AA 级),正文内容都保持了 7:1 以上的 AAA 级对比度。 ![doc](img/fontcolor.png) <file_sep>巧用过渡 ============= 人脑灰质(Gray Matter)会对动态的事物(eg:移动、形变、色变等)保持敏感。在界面中,适当的加入一些过渡效果,能让界面保持生动,同时也能增强用户和界面的沟通。 Adding: 新加入的信息元素应被告知如何使用,从页面转变的信息元素需被重新识别。 Receding: 与当前页无关的信息元素应采用适当方式移除。 Normal: 指那些从转场开始到结束都没有发生变化的信息元素。 ## 在视图变化时保持上下文 滑入与滑出:可以有效构建虚拟空间。 传送带:可极大地扩展虚拟空间。 折叠窗口:在视图切换时,有助于保持上下文,同时也能拓展虚拟空间。 ## 在视图变化时保持上下文 对象增加:在列表/表格中,新增了一个对象。 对象删除:在列表/表格中,删除了一个对象。 对象更改:在列表/表格中,更改了一个对象。 对象呼出:点击页面中元素,呼出一个新对象。 ## 改善感知性能 当无法有效提升『实际性能』时,可以考虑适当转移用户的注意力,来缩短某项操作的感知时间,改善感知性能。 ## 自然运动 参见 Ant Motion 动画语言。 <file_sep>色彩 ============= 设计中对色彩的运用不仅应考虑品牌的识别性,还需达到信息传递、操作指引、交互反馈,或是强化和凸显某一个元素的目的。基于操作系统更注重高效、清晰等特点,Bocom UI 的用色上更偏向简洁实用一些。在选择色彩时有以下三个注意点: 1. 色彩应与产品定位相匹配,且符合用户心理认知; 2. 视觉层次应清晰分明,为重要行动点或关键信息定义一个主色,并建立视觉连续性; 3. 遵守 WCAG 2.0 的 标准,保证足够的对比度,让色彩更容易被视障碍(色盲)用户识别。 ## 中性色边框和背景色 灰色作为中性色在 BocomUI 的网页设计中被大量使用到,它的使用有利于关键内容的衬托和功能的引导。这类色彩主要体现在导航框架、背景底色、描边、或次级操作等等。 ![doc](img/borback.png) 中性色边框和背景色示例 ## 提示信息色 比较稳定的色彩除了中性色外还有具备特定含义的功能色,这类色彩起到传递功能信息、代表某种状态等作用。 ![doc](img/dcolor.png) 提示信息色示例 ## 首选按钮 可以在按钮中包括一个图像和相关的文本,用它们在按钮中创建一个吸引人的标记图像 ![doc](img/button.png) 首选按钮色示例 ## 对比度 当对比度数值低于 3:1 时,弱视用户将很难识别 ![doc](img/contrast.png) 对比度示例 ## 必填字段色 通常必须填写的表单字段会用色彩(比如红色)来进行区分。 ![doc](img/lis.png) 必填字段色示例 <file_sep># Bocom UI 交行 React 组件库 ## 安装模块`npm install` Node.js模块的安装分为全局模式和本地模式。本地模式安装的模块可以在代码中使用“import”调用,但是全局模式安装不可以。全局本安装主要是为了在命令行中使用,例如全局安装了vmarket后,就可以在命令行中直接运行vm命令。 > 安装之前修改npm下载仓库为交行仓库: npm config set registry http://172.16.58.3:7001/ ### 本地安装 ```sh npm install @bocom/bocomui ``` 一般情况下会以本地模式运行,包会被安装到和你的应用程序代码的本地node_modules目录下。 > 注意:安装的同时,将信息写入到应用程序代码的package.json中可以使用:npm install bocom-ui --save。 ### 全局安装 ```sh npm install -g @bocom/bocomui ``` 在全局模式下,Node.js包默认被安装到用户目录的"AppData\Roaming\npm\node_modules"下,例如"C:\Users\Administrator\AppData\Roaming\npm\node_modules"。 ### 安装express进行测试 ```sh npm install express –gf ``` 在命令行中运行,在目录中检测express是否安装成功。 ## 使用 Bocom UI 抛弃传统的资源加载方式,基于 webpack 打包,资源种类多种多样,会涉及一些 webpack 的配置,如下: ```js { module: { loaders: [{ test: /\\.(eot|woff|woff2|ttf|svg|png|jpg)(\\?v=[\\d\\.]+)?$/, loader: 'file?name=files/[hash].[ext]' }, { test: /\\.css$/, loader: 'style!css' }] }, resolve: { alias: { bocom: 'bocom-ui/lib' } } } ``` webpack 配置完成后,即可在代码中使用组件,以 [Button](/components/Button) 为例: ```js import Button from 'bocomui/Button' const ButtonType = () => { return ( <div> <Button>默认</Button> </div> ) } ``` 详细查看[Button](/components/Button) ## 组件全局配置 覆盖或扩展 `defaultProps` 即可,以 [DatePicker](/components/DatePicker) 为例: ```js DatePicker.defaultProps = Object.assign(DatePicker.defaultProps || {}, { placeholder: 'Please select date' }) ``` 命令式 API 模块 [message](/components/message)、[confirm](/components/confirm)、[xhr](/components/xhr) 也支持全局配置,涉及 url 方式加载数据的组件以及 [Form](/components/Form) 均依赖 xhr。详细配置请参考其各自文档 > 全局配置后,这些 API 会变成有状态的,即最终结果受配置影响,所以尽量一次性配置并向其它开发者说明 ## 开发者说明 ### 开发环境安装 ```sh git clone [email protected]:/data/git/bocomui-doc.git cd bocomui-doc npm install npm start ``` 查看: [Home](/ "首页") <file_sep>亲密性 ============= 如果信息之间关联性越高,它们之间的距离就应该越接近,也越像一个视觉单元;反之,则它们的距离就应该越远,也越像多个视觉单元。亲密性的根本目的是实现组织性,让用户对页面结构和信息层次一目了然 ## 纵向间距关系 通过『小号间距』、『中号间距』、『大号间距』这三种规格来划分信息层次。 在这三种规格不适用的情况下,可以通过加减『基础间距』的倍数,或者增加元素来拉开信息层次。 ![gap](img/proximity-y.png) 纵向间距示例:通过上图的三种颜色来判断间距大小 ## 横向间距关系 为了适用不同尺寸的屏幕,在横向采用栅格布局来排布组件,从而保证布局的灵活性。 在一个组件内部,元素的横向间距也应该有所不同。 示例一: ![gap](img/proximity-x.png) 从视觉上通用户名和外网IP来确定左右元素之间的间距 示例二: ![gap](img/proximity-xy.png) 名称左对齐、多选框右对齐对比突显左右元素之间的间距 <file_sep># 简化交互 根据费茨法则(Fitts's Law)所描述的,如果用户鼠标移动距离越少、对象相对目标越大,那么用户越容易操作。 通过运用上下文工具(即:放在内容中的操作工具),使内容和操作融合,从而简化交互。 ## 实时可见工具 如果某个操作非常重要,就应该把它放在界面中,并实时可见。 实时可见工具示例: ![realtime](img/lightweight-realtime.png) 状态一: 文案中出现明显的可点击图标 状态二: 鼠标悬停时,鼠标指针变为手型,颜色加深 状态三: 鼠标点击后,和未点击前有明显的区别(5→6) ## 开关显示工具 如果某些操作只需要在特定模式时显示,可以通过开关来实现。 开关显示工具示例: ![switch](img/lightweight-switch.png) 点击修改后,文本变为输入框,进入编辑状态 ## 交互中的工具 如果操作不重要或者可以通过其他途径完成时,可以将工具放置在用户的操作流程中,减少界面元素,降低认知负担,给用户小惊喜。 ![tool](img/lightweight-tool.png) 鼠标悬停时,Tooltip进行提示,用户点击文本可以进行编辑 ## 可视区域 ≠ 可点击区域 在使用 Table 时,文字链的点击范围受到文字长短影响,可以设置整个单元格为热区,以便用户触发。 ![area](img/lightweight-area.png) 当悬浮在 ID 所在的文字链单元格时,鼠标『指针』随即变为『手型』,单击即可跳转。 当需要增强按钮的响应性时,可以通过增加用户点击热区的范围,而不是增大按钮形状,从而增强响应性,又不缺失美感。 ![btn](img/lightweight-area-btn.png) 鼠标移入按钮附近,即可激活 Hover 状态。 <file_sep>提供邀请 ============= 很多富交互模式(eg:『拖放』、『行内编辑』、『上下文工具』)都有一个共同问题,就是缺少易发现性。所以『提供邀请』是成功完成人机交互的关键所在。 邀请就是引导用户进入下一个交互层次的提醒和暗示,通常包括意符(eg:实时的提示信息)和可供性,以表明在下一个界面可以做什么。当可供性中可感知的部分(Perceived Affordance)表现为意符时,人机交互的过程往往更加自然、顺畅。</br></br></font> ## 静态邀请 指通过可视化技术在页面上提供引导交互的邀请。 引导操作邀请:一般以静态说明形式出现在页面上,不过它们在视觉上也可以表现出多种不同样式。 常见类型:『文本邀请』、『白板式邀请』、『未完成邀请』。 漫游探索邀请:是向用户介绍新功能的好方法,尤其是对于那些设计优良的界面。但是它不是『创口贴』,仅通过它不能解决界面交互的真正问题。 ## 动态邀请 指以响应用户在特定位置执行特定操作的方式,提供特定的邀请。 悬停邀请:在鼠标悬停期间提供邀请。 推论邀请:用于交互期间,合理推断用户可能产生的需求。 更多内容邀请:用于邀请用户查看更多内容。 <file_sep># 图标 图标是具有指代意义的图形,也是一种标识。通过使用图标表达命令,强调状态,表示产品或类别。为了系统及跨平台之间图形认知保持一致, Bocom UI的图标在设计和使用时有以下两个原则点需要注意: 1. 简单的图形语言以及高辨识度。清晰、直观的图标更能明确指代含义便于识别记忆; 2. 保持图标之间一致的风格和表现方式。界面中的所有图标都应该在细节设计、透视和笔画权重上保持一致。 ## 图标 ![icon](img/icon.png) 部分图标示例 ## 命名规则 统一的命名方式有助于管理图标,也能更快速的找到需要的图标。我们为每个图标赋予了语义化的命名,命名规则如下: 1. 实心和描线图标保持同名,用`-o` 来区分,比如 question-circle(实心) 和 question-circle-o(描线) 2. 命名顺序:[图标名]-[形状?]-[描线?]-[方向?] ? 为可选 ![iconName](img/icon-name.png) 命名示例 ## 图标尺寸 应用于页面时请使用 BocomUI 的规范尺寸,与字体搭配时和字体的尺寸保持一致。 例如:和 12pt 字体搭配时,图标使用 12px,图标与文字的间距为 8px。 ![iconSize](img/icon-size.png) 尺寸示例 ## 颜色 图标的颜色需要与搭配文案的色值保持一致(表示状态的除外)。 ![iconColor](img/icon-color.png) 颜色示例 <file_sep>足不出户 ============= 能在这个页面解决的问题,就不要去其它页面解决,因为任何页面刷新和跳转都会引起变化盲视(Change Blindness),导致用户心流(Flow)被打断。 频繁的页面刷新和跳转,就像在看戏时,演员说完一行台词就安排一次谢幕一样。 ## 覆盖层 一般在列表中,通过用户『点击』某个区块,点击可弹出确认框,确认是否执行该操作 <img src="img/stay-list.png"/> 用户点击『删除』后,弹窗进行确认,在当前页面完成任务 <img src="img/stay-list-two.png"/> 当用户点击确定按钮完成任务后,弹窗消失,完成操作。 ## 标签页 将多个平级的信息进行整理和分类了。 <img src="img/stay-Layer.png" width =""/> 标签页一次只显示一组信息。 <file_sep># 即时反应 『即时反应』的重要性体现在交互之后立即给出反馈。 就像『牛顿第三定律』所描述作用力和反作用一样,用户进行了操作或者内部数据发生了变化,系统就应该立即有一个对应的反馈,同时输入量级越大、重要性越高,那么反馈量级越大、重要性越高。 虽然反馈太多(准确的说,错误的反馈太多)是一个问题,但是反馈太少甚至没有反馈的系统,则让人感觉迟钝和笨拙,用户体验更差。 ## 查询模式 自动完成:用户输入时,下拉列表会随着输入的关键词显示匹配项。 ![search](img/reaction-search.png) 用户查询的关键词,如果部分匹配上预留关键词,可以自动完成 ## 反馈模式 ### 实时预览 在用户提交输入之前,让他先行了解系统将如何处理他的输入。 ![pre](img/reaction-preview.png) 实时预览示例 根据用户的输入,提供关于用户名长度和有效性的实时反馈 ### 进度指示 当一个操作需要一定时间完成时,就需要即时告知进度,保持与用户的沟通。 常见的进度指示:『按钮加载』、『表格加载』、文件下载』。可根据操作的量级和重要性,展示不同类型的进度指示。 按钮加载示例 ![loading](img/reaction-loading.png) 表格加载示例 ![table](img/reaction-table.png) 文件下载示例 ![percent](img/reaction-percent.png) <file_sep># 重复 相同的元素在整个界面中不断重复,不仅可以有效降低用户的学习成本,也可以帮助用户识别出这些元素之间的关联性。 重复元素 重复元素可以是一条粗线、一种线框,某种相同的颜色、设计要素、设计风格,某种格式、空间关系等。 ## 线框的重复 ![line](img/repetition-line.png "线框重复示例") 线框重复示例 ## 设计元素的重复 ![icon](img/repetition-icon.png "设计元素重复示例") 设计元素重复示例 ## 文档的重复 ![doc](img/repetition-doc.png "文档格式重复示例") 文档重复示例 <file_sep>基本理念 ============= Bocom UI 不但追求用户的使用体验,还追求设计者和开发者的使用体验,践行“以人为本”的设计理念 ## 极简 如无必要,勿增实体。 简单意味着没有歧义,开箱即用。 ## 一致 对于相同的问题,提供相同的解决方案,减轻用户的认知及记忆负荷,使界面操作方式更符合直觉。 对于确立设计模式、打造符合直觉的产品体验来说极其重要。 ## 高效 帮助用户尽可能快的完成任务,同时亦确保开发设计阶段的高效体验。 让流程更顺畅,更智能化。优化功能逻辑,预判需求,让人们使用的更加轻松快捷。 通俗而科学的设计原则 详见『设计原则』 ## 快乐 用户的快乐 漂亮的组件、精致的排版,使用户在本能层中产生积极反应; 良好的功能、性能和可用性,使用户在行为中产生积极反应; 自我形象、个人满足和美好记忆,使用户在思维中中产生积极反应。 设计者的快乐 从无到有时,提供一整套解决方案,帮助设计者将想法快速形成商业产品并推向市场,快速、低成本试错。 从有到优时,提供一系列自定义建议,帮助设计者塑造产品个性并提升整体体验,持续改进服务。 <file_sep>直截了当 ============= 正如 <NAME> 所言:『需要在哪里输出,就要允许在哪里输入』。这就是直接操作的原理。eg:不要为了编辑内容而打开另一个页面,应该直接在上下文中实现编辑。 ## 页内编辑 ### 单字段行内编辑 当『易读性』远比『易编辑性』重要时,可以使用『单击编辑』。 <img src="img/direct-singleClick.png" width = "600"/> ### 单击编辑示例 状态一:普通的浏览模式,不区分可编辑行和不可编辑行; 状态二:鼠标悬停时,『指针』变为『手型』,编辑区域底色变黄,出现『Tooltips』提示单击编辑; 状态三:鼠标点击后,出现『输入框』、『确定』、『取消』表单元素,同时光标定位在『输入框』中。 当『易读性』为主,同时又要突出操作行的『易编辑性』时,可使用『文字链/图标编辑』。 <img src="img/direct-iconEdit.png" width = "600"/> ### 文字链/图标编辑示例 状态一:在可编辑行附近出现文字链/图标; 状态二:鼠标点击『编辑』后,出现『输入框』、『确定』、『取消』表单元素,同时光标定位在『输入框』中。 ### 多字段行内编辑 <img src="img/direct-multipleClick.png" width = "900"/> 多字段行内编辑示例 可在段内直接编辑 <file_sep>import 'normalize.css' import React, { Component } from 'react' import { Link, IndexLink } from 'react-router' import classnames from 'classnames' import { Row, Col } from 'bocomui/Layout' import { Nav, NavItem } from 'bocomui/Nav' import { Dropdown, DropdownToggle, DropdownMenu } from 'bocomui/Dropdown' import Button from 'bocomui/Button' import Icon from 'bocomui/Icon' import './index.less' class App extends Component { renderNav() { return ( <ul> <li> <IndexLink to="/" activeClassName="active">首页</IndexLink> </li> <li> <Link to="/design" activeClassName="active">设计说明</Link> </li> <li> <Link to="/guide" activeClassName="active">指南</Link> </li> <li> <Link to="/components" activeClassName="active">组件</Link> </li> <li> <Link to="/changelog" activeClassName="active">更新日志</Link> </li> <li> <Link to="/scaffolding" activeClassName="active">脚手架</Link> </li> </ul> ) } render() { const { children } = this.props return ( <div className="wrapper"> <Row className="header" fluid> <Col> <Link to="/" className="header__logo"> <span className="logo"> </span> Bocom UI <sub>v1.0</sub> </Link> </Col> <Col className="header__nav"> {this.renderNav()} </Col> <Col right> <Dropdown ref="dropdown"> <DropdownToggle className="header__nav-toggle"> <Button icon="bars" transparent /> </DropdownToggle> <DropdownMenu onClick={() => this.refs.dropdown.close()} className="header__nav-toggle-popover" align="middle" > {this.renderNav()} </DropdownMenu> </Dropdown> </Col> </Row> <div className="body">{children}</div> <div className="footer">交行GUIP项目组Bocom UI框架</div> </div> ) } } export default App
862fdd3dde94bf6188cc55c38de07cb592ee2bdc
[ "JavaScript", "Markdown" ]
28
JavaScript
zrhperson/rrrrrr
1b01fd31e3360938a412e72b7b5364d24ed2f42c
210e94437cc31d4837a553cb4d8eaa15f6bfc418
refs/heads/master
<repo_name>Systemsadms/drfree24v1.0<file_sep>/intranet/agregarcampo.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> </head> <body> <?php $base="drwebs_"; $tabla="guias"; $anadir="ALTER TABLE "; $anadir.=$tabla; $anadir.=" ADD dimensiones VARCHAR(100) "; $conexion=mysql_connect ("localhost","drfree24db","222702qaaq"); mysql_select_db ($base, $conexion); if(mysql_query ($anadir ,$conexion)) { echo "<h2> A la tabla $tabla se le ha añadido un campo</h2><br>"; }else{ echo "<h2> No ha podido añadir</h2><br>"; }; mysql_close($conexion); ?> ?> </body> </html><file_sep>/example-captcha2.php <html> <body> <form method="post" action="registro3.php"> <?php // descomentar la linea de abajo si tenemos el codigo de validacion en otro archivo require_once('recaptchalib.php'); $publickey = "<KEY>"; echo recaptcha_get_html($publickey, $error); ?> <input type="submit" /> </form> </body> </html> <file_sep>/reg.php <?php $largo=10; $str = "abcdefghijklmnopqrstuvwxyz"; $may = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; $num = "1234567890"; $cad = ""; # Comienzo de la generacion de clave. $cad = substr($may ,rand(0,24),1); $cad .= substr($num ,rand(0,10),1); $cad .= substr($num ,rand(0,10),1); for($i=0; $i<$largo; $i++) { $cad .= substr($str,rand(0,24),1); } //print "\nClave = ". $cad . "\n\n" ; ; $nick = $_POST['nick']; $pass = $cad; $nombres = $_POST['nombres']; $apellidos = $_POST['apellidos']; //$ci = $_POST['ci']; $email = $_POST['email']; $pais = $_POST['pais']; $estado = $_POST['estado']; $ciudad = $_POST['ciudad']; $telefono = $_POST['telefono']; $celular = $_POST['celular']; $direccion = $_POST['dir']; $conn = mysql_connect("localhost","drfree24db","222702qaaq"); mysql_select_db("drwebs_",$conn); $ssql = "SELECT * FROM casilleros WHERE nick ='$nick'"; $rs = mysql_query($ssql,$conn); if (mysql_num_rows($rs)>0) { echo "<b>Ya existe un un usuario con ese nick, por favor intente registrarse con otro nick</b>"; mysql_close($conn); ?> <br /><br /> <a href="registro.html">--> Intentar de Nuevo</a> <?php }else { mysql_select_db("runnerca_drfree24",$conn); $ssql = "SELECT * FROM casilleros WHERE email ='$email'"; $rs = mysql_query($ssql,$conn); if (mysql_num_rows($rs)>0) { echo "<b>Ya existe un un usuario con ese email, por favor intente registrarse con otra direccion de correo</b>"; mysql_close($conn); ?> <br /><br /> <a href="registro.html">--> Intentar de Nuevo</a> <?php }else { mysql_query ("INSERT INTO casilleros VALUES ('', '$nick','$pass','$nombres','$apellidos','$ci','$email','$pais','$estado','$ciudad','$telefono','$celular','$direccion')"); mysql_close ($conn); echo "<b>Bienvenido a la familia de DrFree 24. Se ha registrado exitosamente por favor revise en su correo nuestro email de no estar en la bandeja de entrada buscar en la bandeja de spam o correos no deseados y guardar nuestra direccion como segura.</b>"; echo "<br><br><br>"; //Enviar email a correo $body='Se ha registrado un Nuevo Usuario Usuario: '.$_POST['nick'].' Nombres: '.$_POST['nombres'].' Apellidos: '.$_POST['apellidos'].' Pais: '.$_POST['pais'].' Estado: '.$_POST['estado'].' Ciudad: '.$_POST['ciudad'].' E-Mail: '.$_POST['email'].' Celular: '.$_POST['celular'].' Telefono: '.$_POST['telefono'].' Direccion: '.$_POST['dir'].' '; $body2 = 'Saludos Cordiales: Sr(a). '.$_POST['nombres'].', es un gusto para nosotros que usted forme parte de nuestra cartera de clientes. Su usuario y su password para acceer a su Dr.free24 son los siguientes: Usuario: '.$_POST['nick'].' Password: '.<PASSWORD>.' Usted puede cambiar su contraseña cuando guste desde su cuenta en Drfree24.com Por lo tanto a partir de ahora al realizar cualquier compra usted debera informarle a su proveedor que la direccion de envio de su carga es la siguiente: SHIPPING ADDRESS: Nombre: Dr.free24 /SU NOMBRE COMPLETO Adress: 12250 NW 25th ST Suite 115 Adress2: C/O Air Marine City: MIAMI Estate: FLORIDA Zip Code: 33182 Phone: (305)4773496 Cada vez que recibamos un paquete , usted recibira un correo electronico informandole los detalles de la carga que ha recepcionado. Esta informacion es actualizada a diario por lo tanto usted sera notificado de los paquetes el mismo dia de su recepcion. De igual forma lo invitamos a visitar nuestra pagina web www.drfree24.com donde con su usuario y su contrasena, usted podra monitorear el estatus de sus paquetes y asi estar informado sobre la ubicacion del mismo. Gracias por su interes en trabajar con nosotros, esperamos brindarles un excelente servicio. '; $para="<EMAIL>"; $para2= $_POST['email']; if ( isset ($_POST["btn_enviar"])) { $asunto = "Nuevo Registro de Usuario"; $desde = $_POST["nick"]; $mensaje = $body; $cabeceras = ""; $cabeceras = "MIME-VErsion: 1.0 \r\n"; $cabeceras = "Content-Type: text/html; charset=iso-8859-1\r\n"; $cabeceras = "To: " . $_POST ["nick"] . "\r\n"; $cabeceras = "From: " . $_POST ["email"] . "\r\n"; $asunto2 = "Bienvenido a Drfrre24"; $desde2 = $_POST["nick"]; $mensaje2 = $body2; $cabeceras2 = ""; $cabeceras2 = "MIME-VErsion: 1.0 \r\n"; $cabeceras2 = "Content-Type: text/html; charset=iso-8859-1\r\n"; $cabeceras2 = "To: " . $_POST ["nick"] . "\r\n"; $cabeceras2 = "From: " . "<EMAIL>" . "\r\n"; mail ($para2, $asunto2, $mensaje2, $cabeceras2); mail ($para, $asunto, $mensaje, $cabeceras); echo "<b>Se ha registrado exitosamete, por favor intente ingresar a su cuenta</b>"; } } } ?><file_sep>/mostrar.php <?php $nick = $_SESSION["login"]; $conn = mysql_connect("localhost","drfree24db","222702qaaq"); mysql_select_db("drwebs_",$conn); $ssql = mysql_query("SELECT * FROM casilleros WHERE nick='$nick'"); $cas = mysql_result($ssql,0,"id"); $nick = mysql_result($ssql,0,"nick"); $nombres = mysql_result($ssql,0,"nombres"); $apellidos = mysql_result($ssql,0,"apellidos"); $ci = mysql_result($ssql,0,"ci"); $email = mysql_result($ssql,0,"email"); $pais = mysql_result($ssql,0,"pais"); $estado = mysql_result($ssql,0,"estado"); $ciudad = mysql_result($ssql,0,"ciudad"); $telefono = mysql_result($ssql,0,"telefono"); $celular = mysql_result($ssql,0,"celular"); $dir = mysql_result($ssql,0,"dir"); mysql_close($conn); ?><file_sep>/intranet/editarsave.php <?php if (isset($_POST['guardar'])) { $cas = $_POST['cas']; $nick = $_POST['nick']; $password = $_POST['pass']; $nombres = $_POST['nombres']; $apellidos = $_POST['apellidos']; $cedula = $_POST['ci']; $email = $_POST['email']; $pais = $_POST['pais']; $estado = $_POST['estado']; $ciudad = $_POST['ciudad']; $telefono = $_POST['telefono']; $celular = $_POST['celular']; $dir = $_POST['dir']; $conn = mysql_connect("localhost","drfree24db","222702qaaq"); mysql_select_db("drwebs_",$conn); $consulta = "UPDATE casilleros SET nick ='$nick', pass ='<PASSWORD>' , nombres ='$nombres', apellidos ='$apellidos', ci ='$cedula', email ='$email', pais='$pais', estado='$estado', ciudad='$ciudad', celular='$celular', telefono='$telefono', dir='$dir' WHERE id = $cas" ; $hacerconsulta = mysql_query ($consulta); mysql_close ($conn); header("location:abrir.php"); } ?> <file_sep>/intranet/editar.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> <link href="style/estilo.css" rel="stylesheet" type="text/css" /> <script src="../SpryAssets/SpryValidationTextField.js" type="text/javascript"></script> <link href="../SpryAssets/SpryValidationTextField.css" rel="stylesheet" type="text/css" /> </head> <body> <?php session_start(); if ($_SESSION['admin'] == 'drfree24') { if (isset($_POST["guardar"])) { echo "sus cambios se han guardado con exito"; } else { $id = $_POST['id']; $conn = mysql_connect("localhost","drfree24db","222702qaaq"); mysql_select_db("drwebs_",$conn); $ssql = mysql_query("SELECT * FROM casilleros WHERE id='$id'"); $cas = mysql_result($ssql,0,"id"); $nick = mysql_result($ssql,0,"nick"); $pass = mysql_result($ssql,0,"pass"); $nombres = mysql_result($ssql,0,"nombres"); $apellidos = mysql_result($ssql,0,"apellidos"); //$ci = mysql_result($ssql,0,"ci"); $email = mysql_result($ssql,0,"email"); $pais = mysql_result($ssql,0,"pais"); $estado = mysql_result($ssql,0,"estado"); $ciudad = mysql_result($ssql,0,"ciudad"); $telefono = mysql_result($ssql,0,"telefono"); $celular = mysql_result($ssql,0,"celular"); $dir = mysql_result($ssql,0,"dir"); mysql_close($conn); ?> <div id="MarcoGlobal"> <div id="Marcohead"></div><!--Fin de marco head--> <div id="MarcoMenu"> / <a href="control.php">Volver a buscador</a> / <a href="destruir.php">Cerrar Admin Center</a> / </div> <!--Fin de marco menu--> <div id="Marcocontenedor"> <br /><br /> <b> Editar Datos de Casillero</b> <br /><br /> <form method="post" action="editarsave.php"> <table width="200" border="0" align="center"> <tr> <td width="82" align="left"><b>Cas:</b></td> <td width="102" align="left"><span id="sprytextfield1"> <label for="cas"></label> <input name="cas" type="text" id="cas" value="<?php echo $id ?>" readonly="readonly" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="left"><b>Nick:</b></td> <td align="left"><span id="sprytextfield2"> <label for="nick"></label> <input name="nick" type="text" id="nick" value="<?php echo $nick ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <tr> <td align="left"><b>Clave:</b></td> <td align="left"><span id="sprytextfield3"> <label for="pass"></label> <input name="pass" type="text" id="pass" value="<?php echo $pass ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <td align="left"><b>Nombres:</b></td> <td align="left"><span id="sprytextfield4"> <label for="nombres"></label> <input name="nombres" type="text" id="nombres" value="<?php echo $nombres ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="left"><b>Apellidos:</b></td> <td align="left"><span id="sprytextfield5"> <label for="apellidos"></label> <input name="apellidos" type="text" id="apellidos" value="<?php echo $apellidos ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="left"><b>Cedula:</b></td> <td align="left"><span id="sprytextfield6"> <label for="ci"></label> <input name="ci" type="text" id="ci" value="<?php echo $ci ?>" /> </span></td> </tr> <tr> <td align="left"><b>E-mail:</b></td> <td align="left"><span id="sprytextfield7"> <label for="email"></label> <input name="email" type="text" id="email" value="<?php echo $email ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="left"><b>Pais:</b></td> <td align="left"><span id="sprytextfield8"> <label for="pais"></label> <input name="pais" type="text" id="pais" value="<?php echo $pais ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="left"><b>Estado:</b></td> <td align="left"><span id="sprytextfield9"> <label for="estado"></label> <input name="estado" type="text" id="estado" value="<?php echo $estado ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="left"><b>Ciudad:</b></td> <td align="left"><span id="sprytextfield10"> <label for="ciudad"></label> <input name="ciudad" type="text" id="ciudad" value="<?php echo $ciudad ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="left"><b>Telfono:</b></td> <td align="left"><span id="sprytextfield11"> <label for="telefono"></label> <input name="telefono" type="text" id="telefono" value="<?php echo $telefono ?>" /> </span></td> </tr> <tr> <td align="left"><b>Celular:</b></td> <td align="left"><span id="sprytextfield12"> <label for="celular"></label> <input name="celular" type="text" id="celular" value="<?php echo $celular ?>" /> </span></td> </tr> <tr> <td align="left"><b>Direccion:</b></td> <td align="left"><span id="sprytextfield13"> <label for="dir"></label> <input name="dir" type="text" id="dir" value="<?php echo $dir ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> </table> <br /> <table width="200" border="0" align="center"> <tr> <td><input type="submit" value="Guardar Cambios" name="guardar"/></td> <td><input type="reset" value="Deshacer Cambios" /></td> </tr> </table> </form> <?php } ?> </div><!--Fin de marco contenedor--> </div><!--Fin de marco global--> <?php } else { session_destroy(); header("location:indexn.php"); } ?> <script type="text/javascript"> var sprytextfield1 = new Spry.Widget.ValidationTextField("sprytextfield1"); var sprytextfield2 = new Spry.Widget.ValidationTextField("sprytextfield2"); var sprytextfield3 = new Spry.Widget.ValidationTextField("sprytextfield3"); var sprytextfield4 = new Spry.Widget.ValidationTextField("sprytextfield4"); var sprytextfield5 = new Spry.Widget.ValidationTextField("sprytextfield5"); var sprytextfield6 = new Spry.Widget.ValidationTextField("sprytextfield6", "none", {isRequired:false}); var sprytextfield7 = new Spry.Widget.ValidationTextField("sprytextfield7"); var sprytextfield8 = new Spry.Widget.ValidationTextField("sprytextfield8"); var sprytextfield9 = new Spry.Widget.ValidationTextField("sprytextfield9"); var sprytextfield10 = new Spry.Widget.ValidationTextField("sprytextfield10"); var sprytextfield11 = new Spry.Widget.ValidationTextField("sprytextfield11", "none", {isRequired:false}); var sprytextfield12 = new Spry.Widget.ValidationTextField("sprytextfield12", "none", {isRequired:false}); var sprytextfield13 = new Spry.Widget.ValidationTextField("sprytextfield13"); </script> </body> </html><file_sep>/calcula.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!-- InstanceBegin template="/Templates/drfree24.dwt" codeOutsideHTMLIsLocked="false" --> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <!-- InstanceBeginEditable name="doctitle" --> <title>Drfree24</title> <!-- InstanceEndEditable --> <link href="style/style.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajx/ajxmenu.css" type="text/css" /> <!-- InstanceBeginEditable name="head" --> <!-- InstanceEndEditable --> </head> <body> <div id="MarcoGlobal"> <div id="Marcobusqueda"> <table width="980" border="0"> <tr> <form> <td width="69">&nbsp;</td> <td width="247">&nbsp;</td> <td width="118"><div id="tipodeletraserch"><strong><a href="midrfree24.php">Mi DrFree24</a></strong></div></td> <td width="87"><div id="tipodeletraserch"><strong><a href="http://sistema.sfdservices.com/clientes/sfd/registrarusuario.asp">Tracking</a></strong></div></td> <td width="138"><div id="tipodeletraserch"><strong><a href="intranet/index.html">Intranet</a></strong></div></td> <td width="39">Search:</td> <td width="150"><label for="Serch"></label> <input type="text" name="Serch" id="Serch" /></td> <td width="54"><input type="submit" name="button" id="button" value="Buscar" /></td> <td width="20">&nbsp;</td> </form> </tr> </table> </div><!--DIN DE MENU DE BUSQUEDA--> <div id="Marcomenu"> <div class="AJXMenuQAcHDSC"><!-- AJXFILE:ajx/ajxmenu.css --> <div class="ajxmw1"> <div class="ajxmw2"> <ul> <li><a href="index.php"><b>Inicio</b></a></li> <li><a href="misionvision.html"><b>¿Quienes Somos?</b></a></li> <li><a href="servicios.html"><b>Servicios</b></a></li> <li><a href="comprasusa.html"><b>Comprar en USA</b></a></li> <li><a href="giftcard.html"><b>Gift Cards</b></a></li> <li><a href="galeria.php"><b>Galerias</b></a></li> <li><a href="precios.html"><b>Precios</b></a></li> <li><a href="contactanos.html"><b>Contacto</b></a></li> </ul> </div> </div> <br /> </div> </div><!--Fin de MARCO MENU --><!-- InstanceBeginEditable name="EditRegion1" --> <div id="Marcobanner"></div><!--Fin de MARCO BANNER --> <div id="marcocontenidoregistro"> <br /><br /> <table align="center" bgcolor="#FFFFFF" border="0"> <tr> <td> <?php //------------------------------------------------------------------------------------------------------------------------- if (isset($_POST['aereo'])) { $largo = $_POST['largo']; $ancho = $_POST['ancho']; $alto = $_POST['alto']; $peso = $_POST['peso']; $pv = $largo * $ancho * $alto / 166 ; if($pv > $peso) { $costo = $pv * 6.43; $final = number_format($costo,2,".",","); ?> <table width="667" border="0" align="center" bordercolor="#FF0000"> <tr> <td align="center"><?php echo "El costo estimado de su envio es de" . " " . "<b>".$final*70 . " bs." . "</b>"; ?></td> </tr> <tr> <td align="center">&nbsp;</td> </tr> <tr> <td align="center">&nbsp;</td> </tr> </table> <?php } else { $costo = $peso * 6.43; $final = number_format($costo,2,".",","); ?> <table width="667" border="0" align="center" bordercolor="#FF0000"> <tr> <td align="center"><?php echo "EL costo estimado de su envio es de" . " " . "<b>" . $final*70 . " bs." . "</b>"; ?></td> </tr> <tr> <td align="center">&nbsp;</td> </tr> <tr> <td align="center">&nbsp;</td> </tr> </table> <?php } //Fin de calculo de Envio aereo } //Calculo de envio maritimo if (isset($_POST['maritimo'])) { $largo = $_POST['largo']; $ancho = $_POST['ancho']; $alto = $_POST['alto']; $cuft = $largo * $ancho * $alto / 1728 ; $costo = $cuft * 22; $final = number_format($costo,2,".",","); ?> <table width="667" border="0" align="center" bordercolor="#FF0000"> <tr> <td align="center"><?php echo "EL costo estimado de su envio es de" . " " . "<b>". $final*70 . " bs." . "</b>"; ?></td> </tr> <tr> <td align="center">&nbsp;</td> </tr> <tr> <td align="center">&nbsp;</td> </tr> </table> <?php } ?> </td> </tr> </table> <br /> <br /> <br /> <strong>Tenga presente las siguientes observaciones a la hora de introducir sus datos para que no tenga estimaciones incorrectas : </strong><br /> <br /> <b>1)</b> Las medidas y peso requeridos para cotizar deben ser expresados en pulgadas y libras respectivamente.<br /> <br /> <b>2)</b> Es necesario que las medidas y peso sean de la caja que contiene el articulo, mas no del articulo como tal. Es decir las medidas deben ser del embalaje del articulo. <br /> <br /> <b>3)</b> Tenga presente que por lo general en internet estan las medidas de los articulos y no de sus cajas... mientras que en caso de los articulos que requieren armado, las medidas que aparecen son del articulo ya armado, mas no de la caja donde viene desarmado el articulo. </p> <p>&nbsp;</p> </div><!--Fin de MARCO CONTENIDO 1--> <!-- InstanceEndEditable --> <div id="marcoredsocial"> <table width="200" height="60" border="0" align="right"> <tr> <td><a href="#"><img src="img/face.png" width="44" height="39" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/twit.png" width="40" height="40" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/play.png" width="40" height="41" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/v.png" width="42" height="40"style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40" /></a></td> </tr> </table> </div> <!--Fin de MARCO RED SOCIAL--> <div id="marcodireccion">Dr.free24 Transporte Importador C.A<br /> Derechos Reservados DrFree24 C.A | RIF J-30943606-6 //<a href="politicas.html">Politicas Generales</a><br /> Desing By: <a href="http://www.systemsadms.com">Systems Admins C.A</a></div><!--Fin de MARCO DIRECCION--> </div><!--Fin de MARCO Global --> </body> <!-- InstanceEnd --></html> <file_sep>/intranet/administrar.php <?php $user = $_POST['user']; $pass = $_POST['password']; if ($user == 'drfree24' && $pass == '<PASSWORD>') { session_start(); $_SESSION['admin'] = $user ; header("location:control.php"); } else { header("location:indexn.html"); } ?><file_sep>/prueba/HTMLparte1.php <!DOCTYPE html> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Cargar imagen</title> <meta name="author" content="<NAME>." /> <meta name="history" content="01 noviembre 2012" /> <meta name="email" content="<EMAIL>" /> <style> body { background-color: rgb(250,250,250); color: rgb(50,50,50); font-family: sans-serif; font-size: 100%; width: 600px; margin: auto; } :focus { outline: none; } a { text-decoration: none; color: red; } a:hover { text-decoration: underline; } header { border-bottom: 1px gray dotted; padding-bottom: 25px; margin-bottom: 25px; } header h1 { font-size: xx-large; text-shadow: 1px 1px 5px gray; } header em { color: gray; } section form { font-size: small; } section form fieldset { padding: 10px 25px; background-color: white; border: 1px gray solid; border-radius: .5em; } section form fieldset legend { padding: 5px 10px; border: 1px gray solid; border-radius: .5em; } footer { border-top: 1px gray dotted; padding-top: 25px; margin-top: 25px; position: relative; } .msg { margin-bottom: 20px; padding: 10px; background-color: rgb(255,250,250); border: 1px red dotted; } .elimina { color: blue; } </style> </head> <!-- Pgina demostrativa que permite reducir las imgenes cargadas --> <!-- desde un formulario y almacenarlas en el servidor --> <!-- utilizando las libreras GD de PHP. --> <!-- CREADO POR: <NAME> V. --> <!-- HISTORIA: Noviembre 2012 --> <!-- CONTACTO: <EMAIL> --> <!-- DESCARGAR CDIGO: https://gist.github.com/4687238 --> <body> <!-- HEADER --> <header> <h1>&nbsp;</h1> </header> <!-- SECCION --> <section> <?php if(isset($_POST['submit'])) { ?> <div class="msg">El archivo ha sido cargado satisfactoriamente.</div> <?php } ?> <form action="PHPparte2.php" method="POST" enctype="multipart/form-data"> <fieldset> <legend>Seleccionar una imagen</legend> <div><input type="file" name="foto" /></div> <div style="margin-top: 10px;"><input type="submit" name="submit" /> <a href="<?php echo $_SERVER['PHP_SELF']; ?>">Reiniciar</a></div> </fieldset> </form> </div> </section> <!-- FOOTER --> <!-- FIN DE LA PGINA --> <!-- EOF --> </body> </html><file_sep>/intranet/abrir.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> <link href="style/estilo.css" rel="stylesheet" type="text/css" /> </head> <body> <?php session_start(); if ($_SESSION['admin'] == 'drfree24') { ?> <div id="MarcoGlobal"> <div id="Marcohead"></div><!--Fin de marco head--> <div id="MarcoMenu"> / <a href="control.php">Volver a Buscador</a> / <a href="destruir.php">Cerrar Admin Center</a> </div> <!--Fin de marco menu--> <div id="Marcocontenedor"> <br /><br /><br /> <?php if(isset ($_POST["abrir"])) { $id = $_POST["id"]; $conn = mysql_connect("localhost","drfree24db","222702qaaq"); mysql_select_db("drwebs_",$conn); $ssql = mysql_query("SELECT * FROM casilleros WHERE id='$id'"); if (mysql_num_rows($ssql)==1) { $cas = mysql_result($ssql,0,"id"); $nick = mysql_result($ssql,0,"nick"); $pass = mysql_result($ssql,0,"pass"); $nombres = mysql_result($ssql,0,"nombres"); $apellidos = mysql_result($ssql,0,"apellidos"); //$ci = mysql_result($ssql,0,"ci"); $email = mysql_result($ssql,0,"email"); $pais = mysql_result($ssql,0,"pais"); $estado = mysql_result($ssql,0,"estado"); $ciudad = mysql_result($ssql,0,"ciudad"); $telefono = mysql_result($ssql,0,"telefono"); $celular = mysql_result($ssql,0,"celular"); $dir = mysql_result($ssql,0,"dir"); mysql_close($conn); ?> <table width="200" border="0" align="center"> <tr> <td><table width="200" border="0" align="center"> <tr> <td width="82" align="left"><b>ID:</b></td> <td width="102" align="left"><?php echo $id ?></td> </tr> <tr> <td align="left"><b>Nick:</b></td> <td align="left"><?php echo $nick ?></td> </tr> <tr> <tr> <td align="left"><b>Clave:</b></td> <td align="left"><?php echo $pass ?></td> </tr> <td align="left"><b>Nombres:</b></td> <td align="left"><?php echo $nombres ?></td> </tr> <tr> <td align="left"><b>Apellidos:</b></td> <td align="left"><?php echo $apellidos ?></td> </tr> <!-- <tr> <td align="left"><b>Cedula:</b></td> <td align="left"><?php echo $ci ?></td> </tr> --> <tr> <td align="left"><b>E-mail:</b></td> <td align="left"><?php echo $email ?></td> </tr> <tr> <td align="left"><b>Pais:</b></td> <td align="left"><?php echo $pais ?></td> </tr> <tr> <td align="left"><b>Estado:</b></td> <td align="left"><?php echo $estado ?></td> </tr> <tr> <td align="left"><b>Ciudad:</b></td> <td align="left"><?php echo $ciudad ?></td> </tr> <tr> <td align="left"><b>Telfono:</b></td> <td align="left"><?php echo $telefono ?></td> </tr> <tr> <td align="left"><b>BBpin:</b></td> <td align="left"><?php echo $celular ?></td> </tr> <tr> <td align="left"><b>Direccion:</b></td> <td align="left"><?php echo $dir ?></td> </tr> </table></td> <td valign="top"> <table width="113" border="0" align="center"> <tr> <td width="117" align="center"> <form method="POST" action="editar.php"> <input type="hidden" value="<?php echo $id;?>" name="id"/> <input type="submit" value="Editar Perfil" name="editar"/> </form></td> </tr> <tr> <td align="center"> <form method="POST" action="cargartracking.php"> <input type="hidden" value="<?php echo $id;?>" name="cargarguias"/> <input type="submit" value="Cargar Guias" name="cargartracking"/> </form></td> </tr> <tr> <td align="center"> <form method="POST" action="editartracking.php"> <input type="submit" value="Editar Guias" name="editartrack"/> <input type="hidden" value="<?php echo $id;?>" name="cargarguias"/> </form> </td> </tr> <tr> <td align="center"> <form method="post" action="eliminartracking.php"> <input type="submit" value="Eliminar Guias" name="eliminar"/> <input type="hidden" value="<?php echo $id;?>" name="casillerotra"/> </form></td> </tr> </table> </td> <td valign="top"> <?php include ("inc/trackingbd.php"); $consulta = "SELECT * FROM guias WHERE user ='$id';"; $hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>N° Guia</b></td>"; echo "<td align='center'><b>User</b></td>"; echo "<td align='center'><b>Tracking Number</b></td>"; echo "<td align='center'><b>Direccion de Envio</b></td>"; echo "<td align='center'><b>Libras</b></td>"; echo "<td align='center'><b>Volumen</b></td>"; echo "<td align='center'><b>Costo de Envio</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[5]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); ?> </td><!--Fin de la tabla tracking--> </tr> </table> <br /><br /> <?php } else { echo "<b>Ese numero de casillero no exite</b><br><br>"; echo "<a href='control.php'>Volver a Buscador</a><br><br>"; } } else { echo"<b>Sus Cambios Fueron Guardados con exito reaice una nueva consulta</b></br>"; echo"</br><br>"; } ?> </div><!--Fin de marco contenedor--> </div> <!--Fin de marco global--> <?php } else { session_destroy(); header("location:indexn.php"); } ?> </body> </html><file_sep>/olvidarpass.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Agentes Adunales Edanta C.A</title> <meta name="description" content="Agentes adunales edanta es una empresa que brinda servicios de aduana maritimos, aereos y terrestres"> <meta name="keywords" content="Agentes, aduana, aduanales, aereo, maritimo, mercancia, envios, venezuela"> <meta http-equiv="Content-Type"content="text/html; charset=windows-1252"> <META NAME="revisit-after" content="14 days"> <META NAME="robots" content="index,follow"> <META NAME="distribution" content="global"> <link href="style/style.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="MarcoGlobal"> <div id="MarcoHead"> <div id="sepracion"></div> <div id="letras"> <table width="900" border="0" align="center"> <tr> <td> <table width="545" border="0"> <tr> <td width="10"></td> <td width="36" align="center"><a href="index.php"><img src="img/home.png" width="22" height="21" style="opacity:1;filter:alpha(opacity=100)" onmouseover="this.style.opacity=0.5;this.filters.alpha.opacity=50" onmouseout="this.style.opacity=1;this.filters.alpha.opacity=100"/></a></td> <td width="130" align="center"><a href="quienessomos.html"><strong>Quienes Somos</strong></a></td> <td width="95" align="center"><strong><a href="servicios.html">Servicios</a></strong></td> <td width="87" align="center"><a href="micuenta.php"><strong>Mi Cuenta</strong></a></td> <td width="161" align="center"><strong><a href="contactanos.php">Contactanos</a></strong></td> </tr> </table></td> </tr> </table> </div> </div><!--FIN DE MARCO HEAD--> <div id="Separacion1">.</div> <div id="MarcoContenido"> <table width="228" border="0" align="center"> <tr> <td width="218" align="center"><em><strong>Recuperar Contrase&ntilde;a</strong></em></td> </tr> </table> <br /> <table width="700" border="0" align="center"> <tr> <td width="378" align="center">Ingrese su direccion de correo electronico y en pocos segundo recibira en su email los datos solicitados.</td> </tr> </table> <?php if (isset($_POST['olvidar'])) { $email = $_POST['email']; require ("cnx.php"); $ssql = "SELECT * FROM usuarios WHERE email='$email'"; $rs = mysql_query($ssql,$conexion); if (mysql_num_rows($rs)== 1) { $ssql = mysql_query("SELECT * FROM usuarios WHERE email='$email'"); $pass = mysql_result($ssql,0,"pass"); $body='Hemos recibido una solicitud de recuperacion de contrasena Password: '.$pass.' Si usted no realizo esta solicitud por la seguridad de su cuenta notifique a nuestro personal de inmediato. '; $para= $email; $desde = $_POST["email"]; $mensaje = $body; $asunto = "Recuperacion de Password"; $desde = $_POST["email"]; $mensaje = $body; $cabeceras = ""; $cabeceras = "MIME-VErsion: 1.0 \r\n"; $cabeceras = "Content-Type: text/html; charset=iso-8859-1\r\n"; $cabeceras = "To: " . $_POST ["correo"] . "\r\n"; $cabeceras = "From: " . "<EMAIL>" . "\r\n"; mail ($para, $asunto, $mensaje, $cabeceras); mysql_close($conn); echo "Su contraseņa ha sido enviada a su direccion de correo, recuerde revisar su bandeja de correos no deseados en caso de no encontrar el correo de recuperacion de contrase en la bandeja de entrada"; }else { echo "Esa direccion de correo no se encuentra dentro de nuestra base de datos por favor comuniquese con nuestro personal"; } } ?> <br /> <form method="post" action="#"> <table width="306" border="0" align="center"> <tr> <td width="244" align="center"> Direccion de email: <input type="text" name="email" id="email" /> </td> </tr> <tr> <td align="center">&nbsp;</td> </tr> <tr> <td align="center"><input type="submit" name="olvidar" id="olvidar" value="Enviar Solicitud" /></td> </tr> </table> </form> </div><!--FIN DE MARCO CONTENIDO--> <div id="Separacion1">.</div> <div id="MarcoDireccion"> <table width="900" border="0"> <tr> <td width="448"><table width="426" border="0"> <tr> <td width="271" align="left"><font color="#FFFFFF" size="-1">Edanta Agentes Aduanales C.A . Todos los Derechos reservados</font></td> </tr> </table></td> <td width="442"><table width="426" border="0"> <tr> <td width="271" align="right"><font color="#FFFFFF" size="-1">Desing By : <a href="www.systemsadms.com">Systems Admins C.A</a></font></td> </tr> </table></td> </tr> </table> </div> </div><!--FIN DE MARCO GLOBAL--> </body> </html> <file_sep>/midrfree24.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Drfree24</title> <link href="style/style.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajx/ajxmenu.css" type="text/css" /> <link rel="stylesheet" href="intranet/ajaxtix/ajxlightbox.css" type="text/css" /> <script src="intranet/ajaxtix/ajxlightbox.js" type="text/javascript"></script> </head> <body> <div id="MarcoGlobal"> <div id="Marcobusqueda"> <table width="980" border="0"> <tr> <form> <td width="69">&nbsp;</td> <td width="247">&nbsp;</td> <td width="118"><div id="tipodeletraserch"><strong><a href="midrfree24.php">Mi DrFree24</a></strong></div></td> <td width="87"><div id="tipodeletraserch"><strong><a href="http://sistema.sfdservices.com/clientes/sfd/registrarusuario.asp">Tracking</a></strong></div></td> <td width="138"><div id="tipodeletraserch"><strong><a href="intranet/index.html">Intranet</a></strong></div></td> <td width="39">Search:</td> <td width="150"><label for="Serch"></label> <input type="text" name="Serch" id="Serch" /></td> <td width="54"><input type="submit" name="button" id="button" value="Buscar" /></td> <td width="20">&nbsp;</td> </form> </tr> </table> </div><!--DIN DE MENU DE BUSQUEDA--> <div id="Marcomenu"> <div class="AJXMenuQAcHDSC"><!-- AJXFILE:ajx/ajxmenu.css --> <div class="ajxmw1"> <div class="ajxmw2"> <ul> <li><a href="index.php"><b>Inicio</b></a></li> <li><a href="misionvision.html"><b>¿Quienes Somos?</b></a></li> <li><a href=""><b>Servicios</b></a></li> <li><a href="comprasusa.html"><b>Comprar en USA</b></a></li> <li><a href="giftcard.html"><b>Gift Cards</b></a></li> <li><a href="galeria.php"><b>Galerias</b></a></li> <li><a href="#"><b>Precios</b></a></li> <li><a href="contactanos.html"><b>Contacto</b></a></li> </ul> </div> </div> <br /> </div> </div><!--Fin de MARCO MENU --> <div id="Marcobanner"></div><!--Fin de MARCO BANNER --> <div id="marcocontenidoregistro"> <?php session_start(); if (isset($_SESSION["login"])) { require ("mostrar.php"); ?> <table width="855" border="0" align="center"> <tr> <td width="429">Recuerda cerrar tu seccion al finalizar tu consulta</td> <td width="280" align="center"> <form action "#" method="POST"> <input type="submit" name="datos" value="Ver datos de tu cuenta"> </form> </a></td> <td width="132" align="center"><a href="destruir.php">Cerrar Session</a></td> </tr> </table> <table width="200" border="0" align="center"> <tr> <td> <img src="img/midrfree241.jpg" width="760" height="150" /> </td> </tr> </table> <table width="256" border="0" align="center"> <tr> <td width="205" align="center"><b>Tus articulos en almacen</b></td> </tr> </table> <br /> <!-------------------------------------------------------Tracking-----------------------------------------------------> <?php include ("intranet/inc/trackingbd.php"); $consulta = "SELECT * FROM guias WHERE user='$cas';"; $hacerconsulta=mysql_query ($consulta,$conexion); if (mysql_num_rows($hacerconsulta)>=1) { echo "<table border='1' align='center'>"; echo "<tr>"; echo "<td align='center'><b>N° de Guia</b></td>"; echo "<td align='center'><b>Tracking</b></td>"; echo "<td align='center'><b>Direccion de Envio</b></td>"; echo "<td align='center'><b>Libras</b></td>"; echo "<td align='center'><b>Volumen</b></td>"; echo "<td align='center'><b>Costo de Envio</b></td>"; echo "<td align='center'><b>Descripcion</b></td>"; echo "<td align='center'><b>Dimensiones</b></td>"; echo "<td align='center'><b>Foto</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[5]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; echo "<td align='center'>".$reg[7]."</td>"; echo "<td align='center'>".$reg[9]."</td>"; echo "<td a valign='top' align='center' width='140' > <div class='AJXLightboxGVHQAXA'><a href='intranet/img/guia_".$reg[8].".jpg' rel='ajxlightbox'><img src='intranet/img/guia_".$reg[8].".jpg' height='25px' width='25px'/></a></div> </td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); echo "<br>"; } else { echo "Usted no Posee articulos en almacen"; echo "<br>"; } ?> <!-------------------------------------------------------Fin de Tracking-----------------------------------------------------> <?php if (isset($_POST['datos'])) { ?> <table width="231" border="0" align="center"> <tr> <td align="center"><b>Ver datos de su cuenta</b><br /> -&gt;<a href="midrfree24.php">Ocultar Datos</a>&lt;- <br /> -&gt;<a href="editar.php">Modificar Datos</a> &lt;-</td> </tr> </table> <br /> <table width="200" border="0" align="center"> <tr> <td width="63"><strong>Nick:</strong></td> <td width="121"><?php echo $nick; ?></td> </tr> <tr> <td><strong>Nombres:</strong></td> <td><?php echo $nombres; ?></td> </tr> <tr> <td><strong>Apellidos:</strong></td> <td><?php echo $apellidos; ?></td> </tr> <tr> <td>&nbsp;</td> <td>&nbsp;</td> </tr> <tr> <td><strong>Email:</strong></td> <td><?php echo $email; ?></td> </tr> <tr> <td><strong>Pais:</strong></td> <td><?php echo $pais; ?></td> </tr> <tr> <td><strong>Estado:</strong></td> <td><?php echo $estado; ?></td> </tr> <tr> <td><strong>Ciudad:</strong></td> <td><?php echo $ciudad; ?></td> </tr> <tr> <td><strong>Telefono:</strong></td> <td><?php echo $telefono; ?></td> </tr> <tr> <td><strong>BBpin:</strong></td> <td><?php echo $celular; ?></td> </tr> <tr> <td><strong>Direccion:</strong></td> <td><?php echo $dir; ?></td> </tr> </table> <?php } ?> <br /><br /> <br /><br /> Si realizaste una compra en los Estados Unidos y usaste la direccion de nuestro servicio Delivery, notificanos a travez del siguiente formulario: <br /><br /> <table width="547" border="0" align="center"> <tr> <td width="185" valign="top"> <form method="post" action="terminos.php"> <table width="200" height="334" border="0"> <tr> <td align="center"> <input type="hidden" name="user" value="<?php echo $cas;?>" /> <input type="submit" name="formulario" value="Llenar Formulario"/> </td> </tr> <tr> <td height="135" align="center"> <img src="img/midrfree242.png" width="255" height="133" /> </td> </tr> <tr> <td height="80" align="center"><a href="drfree24/reportarpagos.php"><img src="img/Reportar pagos_02.gif" width="149" height="49" /></a><br /><br /><img src="img/cuenta.JPG" width="251" height="141" /><br /></td> </tr> <tr> <td align="center">&nbsp;</td> </tr> <tr> <td align="center"><img src="img/paypal.JPG" width="142" height="22" /></td> </tr> <tr> <td align="center"> <form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top"> <input type="hidden" name="cmd" value="_s-xclick"> <input type="hidden" name="hosted_button_id" value="2JWXHNW2RK452"> <input type="image" src="https://www.paypalobjects.com/es_XC/i/btn/btn_buynowCC_LG.gif" border="0" name="submit" alt="PayPal, la forma más segura y rápida de pagar en línea."> <img alt="" border="0" src="https://www.paypalobjects.com/es_XC/i/scr/pixel.gif" width="1" height="1"> </form> </td> </tr> <tr> <td align="center"><span dir="ltr"><span dir="ltr"><img src="img/tel.png" width="25" height="25" />02123311154</span> <img src="img/tel.png" width="25" height="25" />(contactenos si no sabe como  pagar su envio a traves de PAYPAL con sus dolares del cupo CADIVI electronico)</span></td> </tr> </table> </form> </td> <td width="292"><table width="389" border="0"> <tr> <td width="183" align="left"><strong>Shipping Address (Envios Aereos)</strong></td> </tr> <tr> <td>Name: Dr.free24 / &quot;Coloque su nombre aqui&quot;</td> </tr> <tr> <td>ADDRESS:12250 NW 25th ST Suite 115</td> </tr> <tr> <td>ADDRESS2: C/O Air Marine</td> </tr> <tr> <td>CITY: MIAMI</td> </tr> <tr> <td>STATE: FLORIDA</td> </tr> <tr> <td>ZIP CODE: 33182</td> </tr> <tr> <td>PHONE: 3054773496</td> </tr> <tr> <td>&nbsp;</td> </tr> <tr> <td><strong>Shipping Address (Envios Maritimos)</strong></td> </tr> <tr> <td>Name: Dr.free24 / &quot;Coloque su nombre aqui&quot;</td> </tr> <tr> <td>ADDRESS:3750 NW 114th Ave Unit 6. Doral,FL</td> </tr> <tr> <td>City: Doral State.</td> </tr> <tr> <td>State: FLORIDA</td> </tr> <tr> <td>Phone: 305 629 5016</td> </tr> <tr> <td>Fax: 305 629 5017</td> </tr> <tr> <td>ZIP CODE: 33178</td> </tr> </table></td> </tr> </table> <BR /><BR /> <br /> <br /><br /> <?php } else { echo "Obtén tu cuenta para que hagas compras en los Estados Unidos, Si todavía no eres miembro de nuestra familia regístrate ahora mismo, es muy fácil, rápido y sin ningún costo. Simplemente completa el siguiente formulario y en un momento seras parte de nuestra familia."; echo "<br><br>"; echo "<br><br>"; ?> <table width="200" border="0" align="center"> <tr> <td><a href="registro.php"><img src="img/registrate-aqui-nectilus-colin.jpg" width="274" height="254" /></a></td> </tr> </table> <br /><br /> <form method="POST" action="validar.php"> <table width="650" border="0" align="center"> <tr> <td>Ingrese con su usuario: </td> <td> <input type="text" name="nick" id="nick" /></td> <td>Password:</td> <td> <input type="password" name="pass" id="pass" /></td> <td><input type="submit" name="btn_enviar" value="Entrar"></td> </tr> </table> <table width="233" border="0" align="center"> <tr> <td width="116" align="center"><h6><a href="recuperarpass.php">Recuperar Contraseña</a></h6></td> </tr> </table> </form> <br /><br /> <?php } ?> <br /><br /><br /><br /> </div><!--Fin de MARCO CONTENIDO 1--> <div id="marcoredsocial"> <table width="200" height="60" border="0" align="right"> <tr> <td><a href="#"><img src="img/face.png" width="44" height="39" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/twit.png" width="40" height="40" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/play.png" width="40" height="41" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/v.png" width="42" height="40"style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40" /></a></td> </tr> </table> </div> <!--Fin de MARCO RED SOCIAL--> <div id="marcodireccion">Dr.free24 Transporte Importador C.A<br /> Derechos Reservados DrFree24 C.A | RIF J-30943606-6 //<a href="politicas.html">Politicas Generales</a><br /> Desing By: <a href="http://www.systemsadms.com">Systems Admins C.A</a></div><!--Fin de MARCO DIRECCION--> </div><!--Fin de MARCO Global --> </body> </html> <file_sep>/terminos.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!-- InstanceBegin template="/Templates/drfree24.dwt" codeOutsideHTMLIsLocked="false" --> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <!-- InstanceBeginEditable name="doctitle" --> <title>Drfree24</title> <!-- InstanceEndEditable --> <link href="style/style.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajx/ajxmenu.css" type="text/css" /> <!-- InstanceBeginEditable name="head" --> <!-- InstanceEndEditable --> </head> <body> <div id="MarcoGlobal"> <div id="Marcobusqueda"> <table width="980" border="0"> <tr> <form> <td width="69">&nbsp;</td> <td width="247">&nbsp;</td> <td width="118"><div id="tipodeletraserch"><strong><a href="midrfree24.php">Mi DrFree24</a></strong></div></td> <td width="87"><div id="tipodeletraserch"><strong><a href="http://sistema.sfdservices.com/clientes/sfd/registrarusuario.asp">Tracking</a></strong></div></td> <td width="138"><div id="tipodeletraserch"><strong><a href="intranet/index.html">Intranet</a></strong></div></td> <td width="39">Search:</td> <td width="150"><label for="Serch"></label> <input type="text" name="Serch" id="Serch" /></td> <td width="54"><input type="submit" name="button" id="button" value="Buscar" /></td> <td width="20">&nbsp;</td> </form> </tr> </table> </div><!--DIN DE MENU DE BUSQUEDA--> <div id="Marcomenu"> <div class="AJXMenuQAcHDSC"><!-- AJXFILE:ajx/ajxmenu.css --> <div class="ajxmw1"> <div class="ajxmw2"> <ul> <li><a href="index.php"><b>Inicio</b></a></li> <li><a href="misionvision.html"><b>¿Quienes Somos?</b></a></li> <li><a href="servicios.html"><b>Servicios</b></a></li> <li><a href="comprasusa.html"><b>Comprar en USA</b></a></li> <li><a href="giftcard.html"><b>Gift Cards</b></a></li> <li><a href="galeria.php"><b>Galerias</b></a></li> <li><a href="precios.html"><b>Precios</b></a></li> <li><a href="contactanos.html"><b>Contacto</b></a></li> </ul> </div> </div> <br /> </div> </div><!--Fin de MARCO MENU --><!-- InstanceBeginEditable name="EditRegion1" --> <div id="Marcobanner"> <table width="303" border="0" align="center"> <tr> <td width="273" align="center"><strong>Recordatorio</strong></td> </tr> </table><br /> </div><!--Fin de MARCO BANNER --> <div id="marcocontenido1"> <table width="456" border="0" align="center"> <tr> <td width="446" align="center"><h3><font color="#FF0000"><strong><em>Recuerde que nuestro costo de envío aéreo mínimo es de 600 Bs. Por favor no enviar paquetes que contengan productos que esten valorados en el mercado venezolano por menos de este monto(ejm: Forros de teléfono, Protectores de pantalla, anti-rayones,chupones, medias etc). De enviarlo usted se hace responsable del paquete y acepta nuestro costo mínimo.</em></strong></font></h3></td> </tr> <tr> <td>&nbsp;</td> </tr> <tr> <td align="center"> <form method="post" action="formulariodecompras.php"> <input type="submit" name="acepto" id="acepto" value="Acepto los terminos" /> <input type="hidden" name="user" value="<?php echo $_POST['user']; ?>" /> </form> </td> </tr> </table> </div><!--Fin de MARCO CONTENIDO 1--> <!-- InstanceEndEditable --> <div id="marcoredsocial"> <table width="200" height="60" border="0" align="right"> <tr> <td><a href="#"><img src="img/face.png" width="44" height="39" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/twit.png" width="40" height="40" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/play.png" width="40" height="41" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/v.png" width="42" height="40"style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40" /></a></td> </tr> </table> </div> <!--Fin de MARCO RED SOCIAL--> <div id="marcodireccion">Dr.free24 Transporte Importador C.A<br /> Derechos Reservados DrFree24 C.A | RIF J-30943606-6 //<a href="politicas.html">Politicas Generales</a><br /> Desing By: <a href="http://www.systemsadms.com">Systems Admins C.A</a></div><!--Fin de MARCO DIRECCION--> </div><!--Fin de MARCO Global --> </body> <!-- InstanceEnd --></html> <file_sep>/validar.php <?php $nick = $_POST['nick']; $pass = $_POST['pass']; $conn = mysql_connect("localhost","drfree24db","222702qaaq"); mysql_select_db("drwebs_",$conn); $ssql = "SELECT * FROM casilleros WHERE nick='$nick' and pass='$pass'"; $rs = mysql_query($ssql,$conn); if (mysql_num_rows($rs)==1) { session_start(); $_SESSION["login"] = $nick; header("location:midrfree24.php"); mysql_close($conn); } else { header("location:indexn.html"); mysql_close($conn); } ?><file_sep>/testcorreo.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> </head> <body> <p>Prueba de Envio de Correo <form method="post" action="#"> </p> <form id="form1" name="form1" method="post" action=""> <p> <label for="nombre"></label> <input type="text" name="nombre" id="nombre" /> NOmbre </p> <p> <label for="email"></label> <input type="text" name="email" id="email" /> Correo</p> <p> <input type="text" name="asunto" id="asunto" /> asunto</p> <p> <input type="text" name="texto" id="texto" /> texto</p> <p> <input type="submit" name="btn_enviar" id="btn_enviar" value="Submit" /> </p> </form> <?php echo $nombre =$_POST['nombre']; echo $email =$_POST['email']; echo $asunto =$_POST['asunto']; echo $texto =$_POST['texto']; if(isset($_POST['btn_enviar'])) { $para = '<EMAIL>'; $asunto = $_POST["asunto"]; $desde = $_POST["email"]; $mensaje = $_POST["texto"]; $cabeceras = ""; $cabeceras = "MIME-VErsion: 1.0 \r\n"; $cabeceras = "Content-Type: text/html; charset=iso-8859-1\r\n"; //$cabeceras = "To: " . $_POST ["desde"] . "\r\n"; $cabeceras .= "From: " . $_POST ["email"] . "\r\n"; mail ($para, $asunto, $mensaje, $cabeceras); echo "Su mensaje ha sido enviado con exito, pronto sera atendido por uno de nuestros analistas"; } else { echo " <b>No se ha enviado nada aun</b>"; } ?> </body> </html><file_sep>/registro3.php <?php if(isset($_POST['btn_enviar'])) { require_once('recaptchalib.php'); $privatekey = "<KEY>"; $resp = null; $error = null; if ($_POST["recaptcha_response_field"]) { $resp = recaptcha_check_answer ($privatekey, $_SERVER["REMOTE_ADDR"], $_POST["recaptcha_challenge_field"], $_POST["recaptcha_response_field"]); if ($resp->is_valid) { $valido ="si"; echo $valido; } else { # set the error code so that we can display it //$error = $resp->error; $valido ="no"; echo $valido; } } } ?><file_sep>/formulariodecompras.php <?php if ($_POST['user'] == "") { header("Location:midrfree24.php"); } else { ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!-- InstanceBegin template="/Templates/drfree24.dwt" codeOutsideHTMLIsLocked="false" --> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <!-- InstanceBeginEditable name="doctitle" --> <title>Drfree24</title> <!-- InstanceEndEditable --> <link href="style/style.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajx/ajxmenu.css" type="text/css" /> <!-- InstanceBeginEditable name="head" --> <script src="SpryAssets/SpryValidationTextField.js" type="text/javascript"></script> <link href="SpryAssets/SpryValidationTextField.css" rel="stylesheet" type="text/css" /> <!-- InstanceEndEditable --> </head> <body> <div id="MarcoGlobal"> <div id="Marcobusqueda"> <table width="980" border="0"> <tr> <form> <td width="69">&nbsp;</td> <td width="247">&nbsp;</td> <td width="118"><div id="tipodeletraserch"><strong><a href="midrfree24.php">Mi DrFree24</a></strong></div></td> <td width="87"><div id="tipodeletraserch"><strong><a href="http://sistema.sfdservices.com/clientes/sfd/registrarusuario.asp">Tracking</a></strong></div></td> <td width="138"><div id="tipodeletraserch"><strong><a href="intranet/index.html">Intranet</a></strong></div></td> <td width="39">Search:</td> <td width="150"><label for="Serch"></label> <input type="text" name="Serch" id="Serch" /></td> <td width="54"><input type="submit" name="button" id="button" value="Buscar" /></td> <td width="20">&nbsp;</td> </form> </tr> </table> </div><!--DIN DE MENU DE BUSQUEDA--> <div id="Marcomenu"> <div class="AJXMenuQAcHDSC"><!-- AJXFILE:ajx/ajxmenu.css --> <div class="ajxmw1"> <div class="ajxmw2"> <ul> <li><a href="index.php"><b>Inicio</b></a></li> <li><a href="misionvision.html"><b>¿Quienes Somos?</b></a></li> <li><a href="servicios.html"><b>Servicios</b></a></li> <li><a href="comprasusa.html"><b>Comprar en USA</b></a></li> <li><a href="giftcard.html"><b>Gift Cards</b></a></li> <li><a href="galeria.php"><b>Galerias</b></a></li> <li><a href="precios.html"><b>Precios</b></a></li> <li><a href="contactanos.html"><b>Contacto</b></a></li> </ul> </div> </div> <br /> </div> </div><!--Fin de MARCO MENU --><!-- InstanceBeginEditable name="EditRegion1" --> <div id="Marcobanner"> <table width="326" border="0" align="center"> <tr> <td width="273" align="center"><strong>Formulario de recolecta de mercancia</strong></td> </tr> </table> </div><!--Fin de MARCO BANNER --> <div id="Marcobanner"><br /><br /> <form action="#" method="post"> <table width="538" border="0" align="center"> <tr> <td align="center"> Fecha de la compra</td> </tr> <tr> <td align="center"><label for="fecha"></label> <span id="sprytextfield1"> <label for="fecha2"></label> <input type="text" name="fecha" id="fecha2" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="center">Nombre del courier que traslada su compra a nuestro almacén</td> </tr> <tr> <td align="center"><span id="sprytextfield2"> <label for="currier"></label> <input type="text" name="currier" id="currier" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="center">Numero tracking  suministrado por el courier.</td> </tr> <tr> <td align="center"><span id="sprytextfield3"> <label for="tracking"></label> <input type="text" name="tracking" id="tracking" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="center">Descripción de la compra</td> </tr> <tr> <td align="center"><span id="sprytextfield4"> <input name="compra" type="text" id="compra" size="50" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="center">Origen de la compra (pegar el link que sale en la barra de su navegador)</td> </tr> <tr> <td align="center"><span id="sprytextfield5"> <input name="link" type="text" id="link" size="50" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td align="center"> <input type="hidden" name="user" value="<?php echo $_POST['user'];?>" /> </td> </tr> </table> <br /> <table width="200" border="0" align="center"> <tr> <td align="center"><input type="submit" name="enviarformulario" value="Enviar Datos de Compra"></td> </tr> </table> </form> <br /><br /> <?php if(isset($_POST['enviarformulario'])) { $body='Se ha reportado una nueva orden de recolecta de mercancia </br> Fecha: '.$_POST['fecha'].' </br> Currier: '.$_POST['currier'].' </br> Nmro Tracking: '.$_POST['tracking'].' </br> Descripcion: '.$_POST['compra'].' </br> Link de Compra: '.$_POST['link'].' </br> Cuenta DrFree24: '.$_POST['user'].' '; $para = '<EMAIL>'; $asunto = 'Se ha reportado una nueva orden de recolecta de mercancia'; $desde = 'drfree24.com'; $mensaje = $body; $cabeceras = ""; $cabeceras = "MIME-VErsion: 1.0 \r\n"; $cabeceras = "Content-Type: text/html; charset=iso-8859-1\r\n"; //$cabeceras = "To: " . $_POST ["desde"] . "\r\n"; $cabeceras .= "From: www.drfree24.com \r\n"; mail ($para, $asunto, $mensaje, $cabeceras); echo "Su mensaje ha sido enviado con exito, pronto sera atendido por uno de nuestros analistas"; } else { echo " <b>No se ha enviado nada aun</b>"; } /* if(isset($_POST['enviarformulario'])) { $body='Se ha reportado una nueva orden de recolecta de mercancia Fecha: '.$_POST['fecha'].' Currier: '.$_POST['currier'].' N° Tracking: '.$_POST['tracking'].' Descripcion: '.$_POST['compra'].' Link de Compra: '.$_POST['link'].' Cuenta DrFree24: '.$_POST['nombre'].' '; $para="<EMAIL>"; $desde = $_POST["nombre"]; $mensaje = $body; $asunto = "Nueva Orden de recolecta de mercancia"; $desde = $_POST["nick"]; $mensaje = $body; $cabeceras = ""; $cabeceras = "MIME-VErsion: 1.0 \r\n"; $cabeceras = "Content-Type: text/html; charset=iso-8859-1\r\n"; $cabeceras = "To: " . $_POST ["nick"] . "\r\n"; $cabeceras = "From: " . $_POST ["correo"] . "\r\n"; mail ($para, $asunto, $mensaje, $cabeceras); */ ?> <table width="710" border="0" align="center"> <tr> <td align="center">..</td> </tr> </table> <br /><br /> <?php ?> </div><!--Fin de MARCO CONTENIDO 1--> <script type="text/javascript"> var sprytextfield1 = new Spry.Widget.ValidationTextField("sprytextfield1"); var sprytextfield2 = new Spry.Widget.ValidationTextField("sprytextfield2"); var sprytextfield3 = new Spry.Widget.ValidationTextField("sprytextfield3"); var sprytextfield4 = new Spry.Widget.ValidationTextField("sprytextfield4"); var sprytextfield5 = new Spry.Widget.ValidationTextField("sprytextfield5"); </script> <?php } ?> <!-- InstanceEndEditable --> <div id="marcoredsocial"> <table width="200" height="60" border="0" align="right"> <tr> <td><a href="#"><img src="img/face.png" width="44" height="39" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/twit.png" width="40" height="40" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/play.png" width="40" height="41" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/v.png" width="42" height="40"style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40" /></a></td> </tr> </table> </div> <!--Fin de MARCO RED SOCIAL--> <div id="marcodireccion">Dr.free24 Transporte Importador C.A<br /> Derechos Reservados DrFree24 C.A | RIF J-30943606-6 //<a href="politicas.html">Politicas Generales</a><br /> Desing By: <a href="http://www.systemsadms.com">Systems Admins C.A</a></div><!--Fin de MARCO DIRECCION--> </div><!--Fin de MARCO Global --> </body> <!-- InstanceEnd --></html> <file_sep>/intranet/eliminarsavetracking.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> <link href="style/estilo.css" rel="stylesheet" type="text/css" /> </head> <body> <?php session_start(); if ($_SESSION['admin'] == 'drfree24') { ?> <div id="MarcoGlobal"> <div id="Marcohead"></div><!--Fin de marco head--> <div id="MarcoMenu"> / <a href="control.php">Volver a buscador</a> / <a href="destruir.php">Cerrar Admin Center</a> / </div> <!--Fin de marco menu--> <div id="Marcocontenedor"> <br /><br /> <?php if(isset($_POST['si'])) { $guia = $_POST['eliminarid']; include ("inc/trackingbd.php"); $consulta = "DELETE FROM guias WHERE guia ='$guia';"; $hacerconsulta=mysql_query ($consulta,$conexion); mysql_close($conexion); echo "El Tracking fue eliminado con exito<br><br>"; echo "<a href='control.php'>Volver a Buscador</a><br><br>"; }else echo "La guia no ha sido eliminada"; ?> </div><!--Fin de marco contenedor--> </div><!--Fin de marco global--> <?php } else { session_destroy(); header("location:indexn.php"); } ?> </body> </html><file_sep>/editarsave.php <?php if (isset($_POST['editar'])) { $id = $_POST['cas']; $pass = $_POST['<PASSWORD>']; $email = $_POST['email']; $pais = $_POST['pais']; $estado = $_POST['estado']; $ciudad = $_POST['ciudad']; $telefono = $_POST['telefono']; $celular = $_POST['celular']; $dir = $_POST['dir']; $conn = mysql_connect("localhost","drfree24db","222702qaaq"); mysql_select_db("drwebs_",$conn); $consulta = "UPDATE casilleros SET pass ='<PASSWORD>', email ='$email', pais='$pais', estado='$estado', ciudad='$ciudad', telefono='$telefono', celular='$celular', dir='$dir' WHERE id= $id" ; $hacerconsulta = mysql_query ($consulta); mysql_close ($conn); header ("location:midrfree24.php"); } ?> <file_sep>/editar.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!-- InstanceBegin template="/Templates/drfree24.dwt" codeOutsideHTMLIsLocked="false" --> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <!-- InstanceBeginEditable name="doctitle" --> <title>Drfree24</title> <!-- InstanceEndEditable --> <link href="style/style.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajx/ajxmenu.css" type="text/css" /> <!-- InstanceBeginEditable name="head" --> <script src="SpryAssets/SpryValidationTextField.js" type="text/javascript"></script> <script src="SpryAssets/SpryValidationConfirm.js" type="text/javascript"></script> <link href="SpryAssets/SpryValidationTextField.css" rel="stylesheet" type="text/css" /> <link href="SpryAssets/SpryValidationConfirm.css" rel="stylesheet" type="text/css" /> <!-- InstanceEndEditable --> </head> <body> <div id="MarcoGlobal"> <div id="Marcobusqueda"> <table width="980" border="0"> <tr> <form> <td width="69">&nbsp;</td> <td width="247">&nbsp;</td> <td width="118"><div id="tipodeletraserch"><strong><a href="midrfree24.php">Mi DrFree24</a></strong></div></td> <td width="87"><div id="tipodeletraserch"><strong><a href="http://sistema.sfdservices.com/clientes/sfd/registrarusuario.asp">Tracking</a></strong></div></td> <td width="138"><div id="tipodeletraserch"><strong><a href="intranet/index.html">Intranet</a></strong></div></td> <td width="39">Search:</td> <td width="150"><label for="Serch"></label> <input type="text" name="Serch" id="Serch" /></td> <td width="54"><input type="submit" name="button" id="button" value="Buscar" /></td> <td width="20">&nbsp;</td> </form> </tr> </table> </div><!--DIN DE MENU DE BUSQUEDA--> <div id="Marcomenu"> <div class="AJXMenuQAcHDSC"><!-- AJXFILE:ajx/ajxmenu.css --> <div class="ajxmw1"> <div class="ajxmw2"> <ul> <li><a href="index.php"><b>Inicio</b></a></li> <li><a href="misionvision.html"><b>¿Quienes Somos?</b></a></li> <li><a href="servicios.html"><b>Servicios</b></a></li> <li><a href="comprasusa.html"><b>Comprar en USA</b></a></li> <li><a href="giftcard.html"><b>Gift Cards</b></a></li> <li><a href="galeria.php"><b>Galerias</b></a></li> <li><a href="precios.html"><b>Precios</b></a></li> <li><a href="contactanos.html"><b>Contacto</b></a></li> </ul> </div> </div> <br /> </div> </div><!--Fin de MARCO MENU --><!-- InstanceBeginEditable name="EditRegion1" --> <div id="Marcobanner"></div><!--Fin de MARCO BANNER --> <div id="marcocobanner"> <?php session_start(); if (isset($_SESSION["login"])) { require ("mostrar.php"); ?> <br /><br /><br /> <form method="post" action="editarsave.php"> <br /><br /> <table width="294" border="0" align="center"> <tr> <td width="88">Nick:</td> <td width="190"><span id="sprytextfield1"> <label for="nick"></label> <input name="nick" type="text" id="nick" value="<?php echo $nick ?>" readonly="readonly" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Password:</td> <td><span id="sprytextfield2"> <label for="pass"></label> <input type="<PASSWORD>" name="pass" id="pass" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Repetir Pass:</td> <td><span id="spryconfirm1"> <label for="rpass"></label> <input type="password" name="rpass" id="rpass" /> <span class="confirmRequiredMsg">A value is required.</span><span class="confirmInvalidMsg">The values don't match.</span></span></td> </tr> <tr> <td>Nombres:</td> <td><span id="sprytextfield4"> <label for="nombres"></label> <input name="nombres" type="text" id="nombres" value="<?php echo $nombres ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Apellidos:</td> <td><span id="sprytextfield5"> <label for="apellidos"></label> <input name="apellidos" type="text" id="apellidos" value="<?php echo $apellidos ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Email:</td> <td><span id="sprytextfield6"> <label for="email"></label> <input name="email" type="text" id="email" value="<?php echo $email ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>&nbsp;</td> <td>&nbsp;</td> </tr> <tr> <td>Pais:</td> <td><label for="pais"></label> <select name="pais" id="pais"> <option selected="selected">Venezuela</option> <option>Estados Unidos</option> <option>Panama</option> <option>Colombia</option> </select></td> </tr> <tr> <td>Estado:</td> <td><span id="sprytextfield7"> <label for="estado"></label> <input name="estado" type="text" id="estado" value="<?php echo $estado ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Ciudad:</td> <td><span id="sprytextfield8"> <label for="ciudad"></label> <input name="ciudad" type="text" id="ciudad" value="<?php echo $ciudad ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Telefono:</td> <td><span id="sprytextfield9"> <label for="telefono"></label> <input name="telefono" type="text" id="telefono" value="<?php echo $telefono ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>BBpin::</td> <td> <input type="text" name="celular" value ="<?php echo $celular ?>" id="textfield" /></td> </tr> <tr> <td>Direccion:</td> <td><span id="sprytextfield11"> <label for="dir"></label> <input name="dir" type="text" id="dir" value="<?php echo $dir ?>" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> </table> <br /> <table width="200" border="0" align="center"> <tr> <td> <input type="hidden" name="cas" value="<?php echo $cas ?>"/> <input type="submit" name="editar" value="Guardar"/> </td> <td><input type="reset" name="borrar" value="Borrar"/></td> </tr> </table> </form> <?php }else { } ?> </div><!--Fin de MARCO CONTENIDO 1--> <script type="text/javascript"> var sprytextfield1 = new Spry.Widget.ValidationTextField("sprytextfield1"); var sprytextfield2 = new Spry.Widget.ValidationTextField("sprytextfield2"); var sprytextfield4 = new Spry.Widget.ValidationTextField("sprytextfield4"); var sprytextfield5 = new Spry.Widget.ValidationTextField("sprytextfield5"); var sprytextfield6 = new Spry.Widget.ValidationTextField("sprytextfield6"); var sprytextfield7 = new Spry.Widget.ValidationTextField("sprytextfield7"); var sprytextfield8 = new Spry.Widget.ValidationTextField("sprytextfield8"); var sprytextfield9 = new Spry.Widget.ValidationTextField("sprytextfield9"); var sprytextfield11 = new Spry.Widget.ValidationTextField("sprytextfield11"); var spryconfirm1 = new Spry.Widget.ValidationConfirm("spryconfirm1", "pass"); </script> <!-- InstanceEndEditable --> <div id="marcoredsocial"> <table width="200" height="60" border="0" align="right"> <tr> <td><a href="#"><img src="img/face.png" width="44" height="39" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/twit.png" width="40" height="40" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/play.png" width="40" height="41" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/v.png" width="42" height="40"style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40" /></a></td> </tr> </table> </div> <!--Fin de MARCO RED SOCIAL--> <div id="marcodireccion">Dr.free24 Transporte Importador C.A<br /> Derechos Reservados DrFree24 C.A | RIF J-30943606-6 //<a href="politicas.html">Politicas Generales</a><br /> Desing By: <a href="http://www.systemsadms.com">Systems Admins C.A</a></div><!--Fin de MARCO DIRECCION--> </div><!--Fin de MARCO Global --> </body> <!-- InstanceEnd --></html> <file_sep>/giftcard.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!-- InstanceBegin template="/Templates/drfree24.dwt" codeOutsideHTMLIsLocked="false" --> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <!-- InstanceBeginEditable name="doctitle" --> <title>Drfree24</title> <!-- InstanceEndEditable --> <link href="style/style.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajx/ajxmenu.css" type="text/css" /> <!-- InstanceBeginEditable name="head" --> <!-- InstanceEndEditable --> </head> <body> <div id="MarcoGlobal"> <div id="Marcobusqueda"> <table width="980" border="0"> <tr> <form> <td width="69">&nbsp;</td> <td width="247"><img src="img/logoblack.png" width="100" height="30" /></td> <td width="118"><div id="tipodeletraserch"><strong><a href="midrfree24.php">Mi DrFree24</a></strong></div></td> <td width="87"><div id="tipodeletraserch"><strong><a href="http://sistema.sfdservices.com/clientes/sfd/registrarusuario.asp">Tracking</a></strong></div></td> <td width="138">&nbsp;</td> <td width="39">Serch:</td> <td width="150"><label for="Serch"></label> <input type="text" name="Serch" id="Serch" /></td> <td width="54"><input type="submit" name="button" id="button" value="Buscar" /></td> <td width="20">&nbsp;</td> </form> </tr> </table> </div><!--DIN DE MENU DE BUSQUEDA--> <div id="Marcomenu"> <div class="AJXMenuQAcHDSC"><!-- AJXFILE:ajx/ajxmenu.css --> <div class="ajxmw1"> <div class="ajxmw2"> <ul> <li><a href="index.php"><b>Inicio</b></a></li> <li><a href="misionvision.html"><b>¿Quienes Somos?</b></a></li> <li><a href=""><b>Servicios</b></a></li> <li><a href="comprasusa.html"><b>Comprar en USA</b></a></li> <li><a href="giftcard.html"><b>Gift Cards</b></a></li> <li><a href="#"><b>Galerias</b></a></li> <li><a href="#"><b>Socios</b></a></li> <li><a href="contactanos.html"><b>Contacto</b></a></li> </ul> </div> </div> <br /> </div> </div><!--Fin de MARCO MENU --><!-- InstanceBeginEditable name="EditRegion1" --> <div id="Marcobannergiftcard"><img src="img/banergiftcard.png" width="980" height="400" border="0" usemap="#Map" /> <map name="Map" id="Map"> <area shape="rect" coords="621,325,758,373" href="comfunciona.html" target="_blank" /> <area shape="rect" coords="807,327,935,372" href="#" target="_blank" /> </map> </div> <!--Fin de MARCO BANNER --> <div id="marcocontenidogifrcard"> <table width="613" border="0" align="center"> <tr> <td width="603" align="center">&nbsp;</td> </tr> </table> <table width="841" border="0" align="center"> <tr> <td width="318" align="center"><a name="gift" id="gift"></a></td> </tr> </table> <br /> <?php if(isset($_POST['correo1'])) { $nombre1= $_POST['nombre1']; $correo1= $_POST['correo1']; $telefono= $_POST['telefono']; $card= $_POST['card']; $cuenta2 = $card * 0.10; $cuenta = $card + $cuenta2; //Enviar email a correo $body='Se he generado una orden de compra Cliente: '.$_POST['nombre1'].' Correo de Cliente: '.$_POST['correo1'].' Telefono de Cliente: '.$_POST['telefono'].' Monto de GiftCard: '.$_POST['card'].'$ '; $Mensaje = ' <html> <head> <title>Orden de Compra Gift Card</title> </head> <body> <table width="614" border="0" align="center"> <tr> <td align="center"><h3>Hemos recibido una orden de compra para una GIFT CARD de '.$card.'$ <br /> <br /> Para completar la orden de compra usted debe realizar las siguientes operaciones.</h3></td> </tr> </table> <p>&nbsp; </p> <table width="506" border="0" align="center"> <tr> <td width="49" align="center"><strong><em>Paso 1</em></strong></td> </tr> <tr> <td align="center">Transfiera a nuestra cuenta paypal, la cantidad de'.$cuenta.' $.<br /><br /></td> </tr> <tr> <td align="center"><table width="247" border="1"> <tr> <td width="215" align="center" bgcolor="#9999CC"> <form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top"> <input type="hidden" name="cmd" value="_s-xclick"> <input type="hidden" name="hosted_button_id" value="6LPM6SXX6AD3A"> <table> <tr><td align="center"><input type="hidden" name="on0" value="Selecciona tu Gift Card"> Selecciona tu Gift Card y Paga con Paypal</td></tr><tr><td align="center"> <br /> <select name="os0"> <option value="Gift Card">Gift Card $100.00 USD</option> <option value="Gift Card">Gift Card $150.00 USD</option> <option value="Gift Card">Gift Card $250.00 USD</option> <option value="Gift Card">Gift Card $500.00 USD</option> </select> </td></tr> </table> <input type="hidden" name="currency_code" value="USD"> <input type="image" src="https://www.paypalobjects.com/es_XC/i/btn/btn_buynowCC_LG.gif" border="0" name="submit" alt="PayPal, la forma más segura y rápida de pagar en línea."> <img alt="" border="0" src="https://www.paypalobjects.com/es_XC/i/scr/pixel.gif" width="1" height="1"> </form> </td> </tr> </table></td> </tr> </table> <BR /> <table width="638" border="0" align="center"> <tr> <td width="49" align="center"><strong><em>Paso 2</em></strong></td> </tr> <tr> <td>Reporte su pago en nuestro sitio web<a href="http://www.systemsadms.com/cliente0082/giftcard.html"> www.drfree24.com</a> seccion GIFT CARD. Nuestro personal se encargara de verificar la validez del reporte.</td> </tr> </table> <br /> <table width="639" border="0" align="center"> <tr> <td width="49" align="center"><strong><em>Paso 3</em></strong></td> </tr> <tr> <td>Nosotros nos encargamos de enviarte la GIFT CARD para que pueda realizar sus compras en amazon.com.</td> </tr> </table> <p>&nbsp;</p> <table width="200" border="0" align="center"> <tr> <td align="center"><b>Telefonos:</b></td> </tr> <tr> <td align="center">(+58) 212 331 11 54 <br> (+580212 331 25 71</td> </tr> <tr> <td align="center"> <img src="http://www.systemsadms.com/cliente0082/img/logo.jpg" width="200" height="75"> </td> </tr> </table> </body> </html> '; $para="<EMAIL>"; $asunto = "Nueva orden de compra"; $desde = $_POST["correo1"]; $mensaje = $body; $cabeceras = ""; $cabeceras = "MIME-VErsion: 1.0 \r\n"; $cabeceras = "Content-Type: text/html; charset=iso-8859-1\r\n"; $cabeceras = "To: " . $_POST ["nombre1"] . "\r\n"; $cabeceras .= "From: " . $_POST ["correo1"] . "\r\n"; $para2= $correo1; $asunto2 = "Orden de Compra Gift Card '$card'$"; $desde2 = "<EMAIL>"; $headers = "MIME-Version: 1.0\r\n"; $headers .= "Content-type: text/html; charset=iso-8859-1\r\n"; $headers .= "From: www.drfree24.com <<EMAIL>>\r\n"; $headers .= "X-Mailer: PHP/" . phpversion(); mail ($para2, $asunto2, $Mensaje, $headers); if (mail ($para, $asunto, $mensaje, $cabeceras)) { echo "<b>Su orden de compra fue generada, recibira en su correo la informacion necesaria para completar su compra, </b><br><br><b>Si es la primera vez que realiza esta compra puede que reciba el mail con los datos en la bandeja de correos no deseados, recuerde revisar esta bandeja y agregarnos como correo permitido</b>"; } } else { echo "<b>Debe completar el formulario para generar una ordend e compra </b>"; } ?> <p>&nbsp;</p> </div><!--Fin de MARCO CONTENIDO 1--> <!--Fin de MARCO CONTENDIO 2--> <!-- InstanceEndEditable --> <div id="marcoredsocial"> <table width="200" height="60" border="0" align="right"> <tr> <td><a href="#"><img src="img/face.png" width="44" height="39" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/twit.png" width="40" height="40" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/play.png" width="40" height="41" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/v.png" width="42" height="40"style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40" /></a></td> </tr> </table> </div> <!--Fin de MARCO RED SOCIAL--> <div id="marcodireccion">Dr.free24 Transporte Importador C.A<br /> Derechos Reservados DrFree24 C.A | RIF J-30943606-6 //<a href="politicas.html">Politicas Generales</a><br /> Desing By: <a href="http://www.systemsadms.com">Systems Admins C.A</a></div><!--Fin de MARCO DIRECCION--> </di<file_sep>/intranet/test.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> </head> <body> <form method="post" action="#" enctype="multipart/form-data"> <input type="file" name="foto" /> <br /> <input type="submit" name="guardartracking" id="button" value="Submit" /> <br /> </form> <br /><br /> <?php if(isset($_POST['guardartracking'])){ #### ## Funcin para redimencionar las imgenes ## utilizando las liberas de GD de PHP #### function resizeImagen($ruta, $nombre, $alto, $ancho,$nombreN,$extension){ $rutaImagenOriginal = $ruta.$nombre; if($extension == 'GIF' || $extension == 'gif'){ $img_original = imagecreatefromgif($rutaImagenOriginal); } if($extension == 'jpg' || $extension == 'JPG'){ $img_original = imagecreatefromjpeg($rutaImagenOriginal); } if($extension == 'png' || $extension == 'PNG'){ $img_original = imagecreatefrompng($rutaImagenOriginal); } $max_ancho = $ancho; $max_alto = $alto; list($ancho,$alto)=getimagesize($rutaImagenOriginal); $x_ratio = $max_ancho / $ancho; $y_ratio = $max_alto / $alto; if( ($ancho <= $max_ancho) && ($alto <= $max_alto) ){//Si ancho $ancho_final = $ancho; $alto_final = $alto; } elseif (($x_ratio * $alto) < $max_alto){ $alto_final = ceil($x_ratio * $alto); $ancho_final = $max_ancho; } else{ $ancho_final = ceil($y_ratio * $ancho); $alto_final = $max_alto; } $tmp=imagecreatetruecolor($ancho_final,$alto_final); imagecopyresampled($tmp,$img_original,0,0,0,0,$ancho_final, $alto_final,$ancho,$alto); imagedestroy($img_original); $calidad=70; imagejpeg($tmp,$ruta.$nombreN,$calidad); } $largo=2; $str = "abcdefghijklmnopqrstuvwxyz"; $may = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; $num = "1234567890"; $cad = ""; # Comienzo de la generacion de clave. $cad = substr($may ,rand(0,24),1); $cad .= substr($num ,rand(0,10),1); $cad .= substr($num ,rand(0,10),1); for($i=0; $i<$largo; $i++) { $cad .= substr($str,rand(0,24),1); } ; // comprobamos que se ha enviado el formulario // comprobar que han seleccionado una foto if($_FILES['foto']['name'] != ""){ // El campo foto contiene una imagen... // Primero, hay que validar que se trata de un JPG/GIF/PNG $allowedExts = array("jpg", "jpeg", "gif", "png", "JPG", "GIF", "PNG"); /* $extension = end(explode(".", $_FILES["foto"]["name"])); if ((($_FILES["foto"]["type"] == "image/gif") || ($_FILES["foto"]["type"] == "image/jpeg") || ($_FILES["foto"]["type"] == "image/png") || ($_FILES["foto"]["type"] == "image/pjpeg")) && in_array($extension, $allowedExts)) */ if($_FILES["foto"]["type"] == "image/jpeg") { // el archivo es un JPG/GIF/PNG, entonces... $extension = "jpg"; $foto = $cad . ".".$extension; $directorio = "img"; // directorio de tu eleccin // almacenar imagen en el servidor move_uploaded_file($_FILES['foto']['tmp_name'], $directorio.'/'.$foto); $minFoto = 'guia_'.$foto; $resFoto = 'res_'.$foto; resizeImagen($directorio.'/', $foto, 800, 600,$minFoto,$extension); //resizeImagen($directorio.'/', $foto, 500, 500,$resFoto,$extension); unlink($directorio.'/'.$foto); echo "Se ha cargado el un nuevo tracking "; echo $cad; } else { // El archivo no es JPG/GIF/PNG $malformato = $_FILES["foto"]["type"]; echo "El tipo de archivo no es una imagen valida"; //header("Location: index.php"); exit; } } else { // El campo foto NO contiene una imagen echo "Se ha cargado un nuevo tracking sin imagen "; //echo "No contiene ninguna imagen"; //header("Location: index.php"); //exit; } } // fin del submit ?> </body> </html><file_sep>/intranet/control.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> <link href="style/estilo.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajaxtix/ajxlightbox.css" type="text/css" /> <script src="ajaxtix/ajxlightbox.js" type="text/javascript"></script> </head> <body> <?php session_start(); if ($_SESSION['admin'] == 'drfree24') { include ("inc/usarBD.php"); ?> <div id="MarcoGlobal"> <div><br /><br /><br /></div><!--Fin de marco head--> <div id="MarcoMenu"><a href="#">Inicio</a> / <a href="galeria.php">Galeria</a> / <a href="destruir.php">Cerrar Admin Center</a> / </div> <!--Fin de marco menu--> <div id="Marcocontenedor"> <br /><br /><br /> <table width="900" border="0" align="center"> <tr> <td width="429" align="center"> <form method="POST" action="#"> <table width="412" border="0" align="center"> <tr> <td width="151"><b>Buscar Cliente Por:</b></td> <td width="73"><select name="filtro" id="filtro"> <option>Nombre</option> <option>Apellido</option> <option>Email</option> </select></td> <td width="37"><b>Dato:</b></td> <td width="42"><label for="dato"></label> <input name="dato" type="text" id="dato" size="7" /></td> <td width="85"><input name="buscar" type="submit" id="dato" value="Ver Clientes"/></td> </tr> </table> </form></td> <td width="43">&nbsp;</td> <td width="412" align="center"><form method="POST" action="#"> <table width="412" border="0" align="center"> <tr> <td width="131"><b>Buscar Guias Por:</b></td> <td width="76"><select name="filtro2" id="filtro2"> <option>Tracking</option> <option>User</option> </select></td> <td width="43" align="right"><b>Nº:</b></td> <td width="52"><label for="dato2"></label> <input name="dato2" type="text" id="dato2" size="7" /></td> <td width="86"><input name="guias" type="submit" id="dato" value="Ver Guias"/></td> </tr> </table> </form></td> </tr> </table> <br /> <form method="post" action="abrir.php"> <table width="237" border="0" align="center"> <tr> <td width="120"><b>Abrir User Nº</b></td> <td width="56"><label for="id"></label> <input name="id" type="text" id="id" size="3" width="35px"/></td> <td width="45"><input type="submit" name="abrir" id="cas" value="Abrir"/></td> </tr> </table> </form> <br /> <!-- <form method="post" action="abrir.php"> <table width="436" border="0" align="center"> <tr> <td width="195"><b>Abrir Casillero Nº</b></td> <td width="144"><label for="cas"></label> <input type="text" name="cas" id="cas" width="35px"/></td> <td width="83"><input type="submit" name="abrir" id="cas" value="Abrir"/></td> </tr> </table> </form> --> <br /><br /><br /> <?php if(isset ($_POST["buscar"])) { $filtro = $_POST['filtro']; $dato = $_POST['dato']; if ($_POST['dato']=="") { $consulta = "SELECT * FROM casilleros;"; $hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>User</b></td>"; echo "<td bgcolor='#CCCCCC' align='center'><b>Nick</b></td>"; echo "<td align='center'><b>Nombres</b></td>"; echo "<td align='center'><b>Apellidos</b></td>"; echo "<td align='center'><b>password</b></td>"; echo "<td align='center'><b>email</b></td>"; echo "<td align='center'><b>pais</b></td>"; echo "<td align='center'><b>estado</b></td>"; echo "<td align='center'><b>ciudad</b></td>"; echo "<td align='center'><b>BBPin</b></td>"; echo "<td align='center'><b>telefono</b></td>"; echo "<td align='center'><b>direccion</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; echo "<td align='center'>".$reg[7]."</td>"; echo "<td align='center'>".$reg[8]."</td>"; echo "<td align='center'>".$reg[9]."</td>"; echo "<td align='center'>".$reg[10]."</td>"; echo "<td align='center'>".$reg[11]."</td>"; echo "<td align='center'>".$reg[12]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); //COMIENZO DE CONSULTA CON FILTRO } elseif ($_POST["filtro"]=="Nombre") { $like = $_POST['dato']; $busqueda = $like; $consulta = "SELECT * FROM casilleros WHERE nombres LIKE '%$busqueda%';"; $hacerconsulta=mysql_query ($consulta,$conexion); //$hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>User</b></td>"; echo "<td bgcolor='#CCCCCC' align='center'><b>Nick</b></td>"; echo "<td align='center'><b>Nombres</b></td>"; echo "<td align='center'><b>Apellidos</b></td>"; echo "<td align='center'><b>password</b></td>"; echo "<td align='center'><b>email</b></td>"; echo "<td align='center'><b>pais</b></td>"; echo "<td align='center'><b>estado</b></td>"; echo "<td align='center'><b>ciudad</b></td>"; echo "<td align='center'><b>BBPin</b></td>"; echo "<td align='center'><b>telefono</b></td>"; echo "<td align='center'><b>direccion</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; echo "<td align='center'>".$reg[7]."</td>"; echo "<td align='center'>".$reg[8]."</td>"; echo "<td align='center'>".$reg[9]."</td>"; echo "<td align='center'>".$reg[10]."</td>"; echo "<td align='center'>".$reg[11]."</td>"; echo "<td align='center'>".$reg[12]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); } elseif ($_POST["filtro"]=="Apellido") { $like = $_POST['dato']; $busqueda = $like; $consulta = "SELECT * FROM casilleros WHERE apellidos LIKE '%$busqueda%';"; $hacerconsulta=mysql_query ($consulta,$conexion); //$hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>User</b></td>"; echo "<td bgcolor='#CCCCCC' align='center'><b>Nick</b></td>"; echo "<td align='center'><b>Nombres</b></td>"; echo "<td align='center'><b>Apellidos</b></td>"; echo "<td align='center'><b>password</b></td>"; echo "<td align='center'><b>email</b></td>"; echo "<td align='center'><b>pais</b></td>"; echo "<td align='center'><b>estado</b></td>"; echo "<td align='center'><b>ciudad</b></td>"; echo "<td align='center'><b>BBPin</b></td>"; echo "<td align='center'><b>telefono</b></td>"; echo "<td align='center'><b>direccion</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; echo "<td align='center'>".$reg[7]."</td>"; echo "<td align='center'>".$reg[8]."</td>"; echo "<td align='center'>".$reg[9]."</td>"; echo "<td align='center'>".$reg[10]."</td>"; echo "<td align='center'>".$reg[11]."</td>"; echo "<td align='center'>".$reg[12]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); } elseif ($_POST["filtro"]=="Email") { $like = $_POST['dato']; $busqueda = $like; $consulta = "SELECT * FROM casilleros WHERE email LIKE '%$busqueda%';"; $hacerconsulta=mysql_query ($consulta,$conexion); //$hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>User</b></td>"; echo "<td bgcolor='#CCCCCC' align='center'><b>Nick</b></td>"; echo "<td align='center'><b>Nombres</b></td>"; echo "<td align='center'><b>Apellidos</b></td>"; echo "<td align='center'><b>password</b></td>"; echo "<td align='center'><b>email</b></td>"; echo "<td align='center'><b>pais</b></td>"; echo "<td align='center'><b>estado</b></td>"; echo "<td align='center'><b>ciudad</b></td>"; echo "<td align='center'><b>BBPin</b></td>"; echo "<td align='center'><b>telefono</b></td>"; echo "<td align='center'><b>direccion</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; echo "<td align='center'>".$reg[7]."</td>"; echo "<td align='center'>".$reg[8]."</td>"; echo "<td align='center'>".$reg[9]."</td>"; echo "<td align='center'>".$reg[10]."</td>"; echo "<td align='center'>".$reg[11]."</td>"; echo "<td align='center'>".$reg[12]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); } } //FIN DE CONSULTA CON FILTRO 1 if (isset ($_POST["guias"])) { $filtro = $_POST['filtro']; $dato = $_POST['dato']; if ($_POST['dato2']=="") { $consulta = "SELECT * FROM guias;"; $hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>Guia</b></td>"; echo "<td align='center'><b>User</b></td>"; echo "<td align='center'><b>Tracking</b></td>"; echo "<td align='center'><b>Direccion de Envio</b></td>"; echo "<td align='center'><b>Libras</b></td>"; echo "<td align='center'><b>Volumen</b></td>"; echo "<td align='center'><b>Cosot de Envio</b></td>"; echo "<td align='center'><b>Descripcion</b></td>"; echo "<td align='center'><b>Foto</b></td>"; echo "<td align='center'><b>Dimensiones</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[5]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; echo "<td align='center'>".$reg[7]."</td>"; echo " <td a valign='top' align='center' width='140' > <div class='AJXLightboxGVHQAXA'><a href='img/guia_".$reg[8].".jpg' rel='ajxlightbox'><img src='img/guia_".$reg[8].".jpg' width='25px' height='25px'/></a> </div> </td>"; echo "<td align='center'>".$reg[9]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); } elseif ($_POST["filtro2"]=="Tracking") { $like = $_POST['dato2']; $busqueda = $like; $consulta = "SELECT * FROM guias WHERE tracking LIKE '%$busqueda%';"; $hacerconsulta=mysql_query ($consulta,$conexion); //$hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>Guia</b></td>"; echo "<td align='center'><b>User</b></td>"; echo "<td align='center'><b>Tracking</b></td>"; echo "<td align='center'><b>Direccion de Envio</b></td>"; echo "<td align='center'><b>Libras</b></td>"; echo "<td align='center'><b>Volumen</b></td>"; echo "<td align='center'><b>Cosot de Envio</b></td>"; echo "<td align='center'><b>Descripcion</b></td>"; echo "<td align='center'><b>Foto</b></td>"; echo "<td align='center'><b>Dimensiones</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[5]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; echo "<td align='center'>".$reg[7]."</td>"; echo "<td a valign='top' align='center' width='140' > <div class='AJXLightboxGVHQAXA'><a href='img/guia_".$reg[8].".jpg' rel='ajxlightbox'><img src='img/guia_".$reg[8].".jpg' width='25px' height='25px'/></a> </div> </td>"; echo "<td align='center'>".$reg[9]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); } elseif ($_POST["filtro2"]=="User") { $like = $_POST['dato2']; $busqueda = $like; $consulta = "SELECT * FROM guias WHERE user LIKE '%$busqueda%';"; $hacerconsulta=mysql_query ($consulta,$conexion); //$hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>Guia</b></td>"; echo "<td align='center'><b>User</b></td>"; echo "<td align='center'><b>Tracking</b></td>"; echo "<td align='center'><b>Direccion de Envio</b></td>"; echo "<td align='center'><b>Libras</b></td>"; echo "<td align='center'><b>Volumen</b></td>"; echo "<td align='center'><b>Cosot de Envio</b></td>"; echo "<td align='center'><b>Descripcion</b></td>"; echo "<td align='center'><b>Foto</b></td>"; echo "<td align='center'><b>Dimensiones</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[5]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; echo "<td align='center'>".$reg[7]."</td>"; echo "<td align='center'>".$reg[9]."</td>"; echo "<td a valign='top' align='center' width='140' > <div class='AJXLightboxGVHQAXA'><a href='img/guia_".$reg[8].".jpg' rel='ajxlightbox'><img src='img/guia_".$reg[8].".jpg' width='25px' height='25px'/></a> </div> </td>"; echo "<td align='center'>".$reg[9]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); } //FIN DE CONSULTA CON FILTRO 2 } ?> <br /><br /> </div><!--Fin de marco contenedor--> </div> <!--Fin de marco global--> <?php } else { session_destroy(); header("location:indexn.php"); } ?> </body> </html><file_sep>/registro.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!-- InstanceBegin template="/Templates/drfree24.dwt" codeOutsideHTMLIsLocked="false" --> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <!-- InstanceBeginEditable name="doctitle" --> <title>Drfree24</title> <!-- InstanceEndEditable --> <link href="style/style.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajx/ajxmenu.css" type="text/css" /> <!-- InstanceBeginEditable name="head" --> <script src="SpryAssets/SpryValidationTextField.js" type="text/javascript"></script><script src="SpryAssets/SpryValidationCheckbox.js" type="text/javascript"></script> <link href="SpryAssets/SpryValidationTextField.css" rel="stylesheet" type="text/css" /> <link href="SpryAssets/SpryValidationCheckbox.css" rel="stylesheet" type="text/css" /> <!-- InstanceEndEditable --> </head> <body> <div id="MarcoGlobal"> <div id="Marcobusqueda"> <table width="980" border="0"> <tr> <form> <td width="69">&nbsp;</td> <td width="247"><img src="img/logoblack.png" width="100" height="30" /></td> <td width="118"><div id="tipodeletraserch"><strong><a href="midrfree24.php">Mi DrFree24</a></strong></div></td> <td width="87"><div id="tipodeletraserch"><strong><a href="http://sistema.sfdservices.com/clientes/sfd/registrarusuario.asp">Tracking</a></strong></div></td> <td width="138"><div id="tipodeletraserch"><strong><a href="intranet/index.html">Intranet</a></strong></div></td> <td width="39">Search:</td> <td width="150"><label for="Serch"></label> <input type="text" name="Serch" id="Serch" /></td> <td width="54"><input type="submit" name="button" id="button" value="Buscar" /></td> <td width="20">&nbsp;</td> </form> </tr> </table> </div><!--DIN DE MENU DE BUSQUEDA--> <div id="Marcomenu"> <div class="AJXMenuQAcHDSC"><!-- AJXFILE:ajx/ajxmenu.css --> <div class="ajxmw1"> <div class="ajxmw2"> <ul> <li><a href="index.php"><b>Inicio</b></a></li> <li><a href="misionvision.html"><b>¿Quienes Somos?</b></a></li> <li><a href="servicios.html"><b>Servicios</b></a></li> <li><a href="comprasusa.html"><b>Comprar en USA</b></a></li> <li><a href="giftcard.html"><b>Gift Cards</b></a></li> <li><a href="galeria.php"><b>Galerias</b></a></li> <li><a href="precios.html"><b>Precios</b></a></li> <li><a href="contactanos.html"><b>Contacto</b></a></li> </ul> </div> </div> <br /> </div> </div><!--Fin de MARCO MENU --><!-- InstanceBeginEditable name="EditRegion1" --> <div id="Marcobanner"> <img src="img/uneteanosotros.jpg" width="980" height="288" /> </div><!--Fin de MARCO BANNER --> <div id="marcocontenidoregistro"> <br /><br /> <form method="POST" action="registro2.php"> <table width="817" border="0" align="center"> <tr> <td align="center"> Coloque sus datos para crear su usuario Drfree24 <br /><br /> <fieldset> <legend>Datos para su usuario</legend> <table width="837" border="0"> <tr> </tr> </table> <br /> <legend></legend> <table width="837" border="0"> <tr> <td width="84">Usuario:</td> <td width="313"><span id="sprytextfield3"> <label for="nick"></label> <input type="text" name="nick" id="nick" /> <span class="textfieldRequiredMsg">Informacion Requerida.</span></span></td> <td width="103">Nombres:</td> <td width="309"><span id="sprytextfield4"> <label for="nombres"></label> <input type="text" name="nombres" id="nombres" /> <span class="textfieldRequiredMsg">Informacion Requerida.</span></span></td> </tr> <tr> <td>Apellidos:</td> <td><span id="sprytextfield5"> <label for="apellidos"></label> <input type="text" name="apellidos" id="apellidos" /> <span class="textfieldRequiredMsg">Informacion Requerida.</span></span></td> <td>E-Mail:</td> <td><span id="sprytextfield6"> <label for="email"></label> <input type="text" name="email" id="email" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> </table> </fieldset> <br /> <fieldset> <legend>Su ubicacion</legend> <table width="837" border="0"> <tr> <td width="80">Pais:</td> <td width="184"><label for="pais"></label> <select name="pais" id="pais"> <option selected="selected">Venezuela</option> <option>Estados Unidos</option> <option>Panama</option> </select></td> <td width="243">Estado:<span id="sprytextfield7"> <label for="estado"></label> <input type="text" name="estado" id="estado" /> <span class="textfieldRequiredMsg">Informacion Requerida.</span></span></td> <td width="83">Ciudad:</td> <td width="225"><span id="sprytextfield8"> <label for="ciudad"></label> <input type="text" name="ciudad" id="ciudad" /> <span class="textfieldRequiredMsg">Informacion Requerida.</span></span></td> </tr> </table> </fieldset> <br /> <fieldset> <legend>Datos de contacto</legend> <table width="837" border="0"> <tr> <td width="73">Celular:</td> <td width="166"><span id="sprytextfield9"> <label for="celular"></label> <input type="text" name="celular" id="celular" /> <span class="textfieldRequiredMsg">Informacion Requerida.</span></span></td> <td width="78">BBpin:</td> <td width="181"><label for="telefono"></label> <input type="text" name="telefono" id="telefono" /></td> <td width="82">Direccion:</td> <td width="217"><span id="sprytextfield11"> <label for="dir"></label> <input type="text" name="dir" id="dir" /> <span class="textfieldRequiredMsg">Informacion Requerida.</span></span></td> </tr> </table> </fieldset> <br /> <fieldset> <legend>Terminos Legales</legend> <table width="837" border="0"> <tr> <td width="26"><span id="sprycheckbox1"> <input type="checkbox" name="confirmar" id="confirmar" /> <label for="confirmar"></label> <span class="checkboxRequiredMsg">Acepte los terminos.</span></span></td> <td width="795">He leido y acepto<a href="politicas.html" target="_blank"> las politicas generales de envio, transitos nacionales y seguros</a> de drfree24</td> </tr> </table> <br /><br /> <table width="200" border="0"> <tr> <td> <?php // descomentar la linea de abajo si tenemos el codigo de validacion en otro archivo require_once('recaptchalib.php'); $publickey = "<KEY>"; echo recaptcha_get_html($publickey, $error); ?> </td> </tr> </table> </fieldset> <br /><br /> <table width="300" border="0" align="center"> <tr> <td align="center"><input type="submit" name="btn_enviar" value="Registrarme"/></td> <td align="center"><input type="reset" value="Borrar Datos"/></td> </tr> </table> </td> </tr> </table> </form> <br /> <br /> </div><!--Fin de MARCO CONTENIDO 1--> <script type="text/javascript"> var sprytextfield3 = new Spry.Widget.ValidationTextField("sprytextfield3"); var sprytextfield4 = new Spry.Widget.ValidationTextField("sprytextfield4"); var sprytextfield5 = new Spry.Widget.ValidationTextField("sprytextfield5"); var sprytextfield7 = new Spry.Widget.ValidationTextField("sprytextfield7"); var sprytextfield8 = new Spry.Widget.ValidationTextField("sprytextfield8"); var sprytextfield9 = new Spry.Widget.ValidationTextField("sprytextfield9"); var sprytextfield11 = new Spry.Widget.ValidationTextField("sprytextfield11"); var sprycheckbox1 = new Spry.Widget.ValidationCheckbox("sprycheckbox1"); var sprytextfield6 = new Spry.Widget.ValidationTextField("sprytextfield6"); </script> <!-- InstanceEndEditable --> <div id="marcoredsocial"> <table width="200" height="60" border="0" align="right"> <tr> <td><a href="#"><img src="img/face.png" width="44" height="39" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/twit.png" width="40" height="40" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/play.png" width="40" height="41" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/v.png" width="42" height="40"style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40" /></a></td> </tr> </table> </div> <!--Fin de MARCO RED SOCIAL--> <div id="marcodireccion">Dr.free24 Transporte Importador C.A<br /> Derechos Reservados DrFree24 C.A | RIF J-30943606-6 //<a href="politicas.html">Politicas Generales</a><br /> Desing By: <a href="http://www.systemsadms.com">Systems Admins C.A</a></div><!--Fin de MARCO DIRECCION--> </div><!--Fin de MARCO Global --> </body> <!-- InstanceEnd --></html> <file_sep>/tarifas.php <?php session_start(); ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Welcome to Drfree24.com</title> <link href="style/style.css" rel="stylesheet" type="text/css" /> </head> <body> <p>&nbsp;</p> <div id="MarcoGlobal"> <div id="MarcoHead"> <div id="Logo"></div> <div id="Dir"> <table width="300" border="0" align="center"> <tr> <td><p>&nbsp;</p> <form> <table width="300" border="0" align="center"> <tr> <td width="25"><img src="img/user.jpg" width="25" height="25" /></td> <td width="39">Usuario</td> <td width="66"><input name="textfield6" type="text" id="textfield6" size="8" /></td> <td width="34">Clave</td> <td width="75"><input name="textfield7" type="password" id="textfield7" size="8" /></td> <td width="35"><a href="#"><img src="img/check.JPG" width="29" height="23" /></a></td> </tr> </table> <table width="300" border="0" align="center"> <tr> <td width="158" align="center">¿Nuevo?Registrate aqui</td> <td width="26">&nbsp;</td> <td width="102">Olvide mi clave</td> </tr> </table> </form> </td> </tr> </table> </div><!--FIN DE DIR--> <div id="social"> <table width="300" border="0" align="center"> <tr> <td><table width="200" border="0" align="center"> <tr> <td width="50"><img src="img/twitter.jpg" width="48" height="45" /></td> <td width="87"><img src="img/face.jpg" width="45" height="40" /></td> <td width="16"><img src="img/play.jpg" width="45" height="40" /></td> <td width="19"><img src="img/ven.jpg" width="45" height="40" /></td> <td width="19"><img src="img/ban.jpg" width="55" height="45" /></td> </tr> </table></td> </tr> </table> </div><!--FIN DE SOCIAL--> </div><!--FIN DE MARCO HEAD--> <div id="Separacion"></div><!----> <div id="Marcomenu"> <div id="home"> <table width="70" border="0" align="center"> <tr> <td align="center"><a href="index.php"><img src="img/casa.png" width="42" height="43" style="opacity:1;filter:alpha(opacity=100)" onmouseover="this.style.opacity=0.5;this.filters.alpha.opacity=50" onmouseout="this.style.opacity=1;this.filters.alpha.opacity=100"/></a></td> </tr> </table> </div><!--FIN MENU--> <div id="menu"> <table width="50" border="0" align="center"> <tr> <td><a href="micuenta.php"><img src="img/drfree.png" width="121" height="50" style="opacity:1;filter:alpha(opacity=100)" onmouseover="this.style.opacity=0.5;this.filters.alpha.opacity=50" onmouseout="this.style.opacity=1;this.filters.alpha.opacity=100"/></a></td> </tr> </table></div><!--FIN MENU--> <div id="menu"> <table width="50" border="0" align="center"> <tr> <td><a href="servicios.php"><img src="img/servicios.png" width="121" height="50" style="opacity:1;filter:alpha(opacity=100)" onmouseover="this.style.opacity=0.7;this.filters.alpha.opacity=70" onmouseout="this.style.opacity=1;this.filters.alpha.opacity=100"/></a></td> </tr> </table> </div><!--FIN MENU--> <div id="menu"> <table width="50" border="0" align="center"> <tr> <td><a href="compraenusa.php"><img src="img/compras.png" width="121" height="50" style="opacity:1;filter:alpha(opacity=100)" onmouseover="this.style.opacity=0.7;this.filters.alpha.opacity=70" onmouseout="this.style.opacity=1;this.filters.alpha.opacity=100" /></a></td> </tr> </table> </div><!--FIN MENU--> <div id="menu"> <table width="50" border="0" align="center"> <tr> <td><a href="tarifas.php"><img src="img/tarifas.png" width="121" height="50" style="opacity:1;filter:alpha(opacity=100)" onmouseover="this.style.opacity=0.7;this.filters.alpha.opacity=70" onmouseout="this.style.opacity=1;this.filters.alpha.opacity=100"/></a></td> </tr> </table> </div><!--FIN MENU--> <div id="menu"> <table width="50" border="0" align="center"> <tr> <td><a href="#"><img src="img/preguntas.png" width="121" height="50" style="opacity:1;filter:alpha(opacity=100)" onmouseover="this.style.opacity=0.7;this.filters.alpha.opacity=70" onmouseout="this.style.opacity=1;this.filters.alpha.opacity=100"/></a></td> </tr> </table> </div><!--FIN MENU--> <div id="menu"> <table width="50" border="0" align="center"> <tr> <td><a href="contacto.php"><img src="img/contacto.png" width="121" height="50" style="opacity:1;filter:alpha(opacity=100)" onmouseover="this.style.opacity=0.7;this.filters.alpha.opacity=70" onmouseout="this.style.opacity=1;this.filters.alpha.opacity=100"/></a></td> </tr> </table> </div><!--FIN MENU--> </div><!--FIN DE MARCO MENU--> <img src="img/billetes.jpg" width="900" height="117" /> <div id="MarcoContenidotarifa"> <br /><br /> <table width="900" border="0"> <tr> <td width="404"> <table width="404" border="0" bgcolor="#CCCCCC"> <tr> <td width="90" align="center" valign="middle"><img src="img/avion.png" width="83" height="86" /></td> <td width="322"> <br /> <strong><font size="+1">Costo de Envios Aereos</font></strong><br /><br /> <strong>1 Lb:<br /> Bs. 528.00 + IVA. Bs. 72.00. Costo Total 600.00 Bs <br /><br /> Costo Minimo <br /> Bs.528.00 Bs mas I.V.A 72.00 bs costo total 600.00 bs </strong></td> </tr> </table> <br /><br /> <table width="404" border="0" bgcolor="#CCCCCC"> <tr> <td width="90" align="center" valign="middle"><img src="img/barco.png" width="83" height="85" /></td> <td width="322"><br /> <strong><font size="+1">Costo de Envios Maritimos</font></strong><br /> <br /> <strong>1 Pie Cubico:<br /> Bs. 2000.00 Bs costo base + I.V.A 240.00 Bs. Costo total 2240.00 Bs<br /> <br /> Costo Minimo: <br /> Bs. 2000.00 Bs mas I.V.A 240.00 bs costo total 2240.00 bs </strong></td> </tr> </table> <p>&nbsp;</p></td> <td width="282"><table width="200" border="0" align="center"> <tr> <td align="center">&nbsp;</td> </tr> </table></td> <td width="200" bgcolor="#CCCCCC"><strong>Tenga presente las siguientes observaciones a la hora de introducir sus datos para que no tenga estimaciones incorrectas : </strong><br /> <br /> <strong>1)</strong> Las medidas y peso requeridos para cotizar deben ser expresados en pulgadas y libras respectivamente.<br /> <br /> <strong>2)</strong> Es necesario que las medidas y peso sean de la caja que contiene el articulo, mas no del articulo como tal. Es decir las medidas deben ser del embalaje del articulo. <br /> <br /> <strong>3)</strong> Tenga presente que por lo general en internet estan las medidas de los articulos y no de sus cajas... mientras que en caso de los articulos que requieren armado, las medidas que aparecen son del articulo ya armado, mas no de la caja donde viene desarmado el articulo. </td> </tr> </table> </div><!--MarcoContenido--> <div id="Separacion"></div><!----> <div id="MarcoDireccion"> <table width="900" border="0" align="left"> <tr> <td width="525" align="left"><em><strong>Drfree24 Transporte Importador C.A</strong></em></td> <td width="365"><table width="284" border="0" align="center"> <tr> <td width="140">&nbsp;</td> <td width="29"><img src="img/twitter.jpg" width="35" height="35" /></td> <td width="30"><img src="img/face.jpg" width="35" height="30" /></td> <td width="31"><img src="img/play.jpg" width="35" height="30" /></td> <td width="32"><img src="img/ven.jpg" width="35" height="30" /></td> </tr> </table></td> </tr> </table> </div> <!--FIN de amrco direccion--> </div><!--FIN DE MARCO GLOBAL--> </body> </html><file_sep>/intranet/editartracking.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> <link href="style/estilo.css" rel="stylesheet" type="text/css" /> </head> <body> <?php session_start(); if ($_SESSION['admin'] == 'drfree24') { $id = $_POST['cargarguias']; ?> <div id="MarcoGlobal"> <div id="Marcohead"></div><!--Fin de marco head--> <div id="MarcoMenu"> / <a href="control.php">Volver a buscador</a> / <a href="destruir.php">Cerrar Admin Center</a> / </div> <!--Fin de marco menu--> <div id="Marcocontenedor"> <br /><br /> <b> Editar Guias</b> <br /><br /> <?php include ("inc/trackingbd.php"); $consulta = "SELECT * FROM guias WHERE user ='$id';"; $hacerconsulta=mysql_query ($consulta,$conexion); echo "<table border='3' bordercolor='#FF0000' align='center'>"; echo "<tr>"; echo "<td align='center'><b>N° Guia</b></td>"; echo "<td align='center'><b>User</b></td>"; echo "<td align='center'><b>Tracking Number</b></td>"; echo "<td align='center'><b>Direccion de Envio</b></td>"; echo "<td align='center'><b>Libras</b></td>"; echo "<td align='center'><b>Volumen</b></td>"; echo "<td align='center'><b>Costo de Envio</b></td>"; echo "</tr>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); while ($reg) { echo "<tr>"; echo "<td align='center'>".$reg[0]."</td>"; echo "<td align='center'>".$reg[1]."</td>"; echo "<td align='center'>".$reg[2]."</td>"; echo "<td align='center'>".$reg[3]."</td>"; echo "<td align='center'>".$reg[4]."</td>"; echo "<td align='center'>".$reg[5]."</td>"; echo "<td align='center'>".$reg[6]."</td>"; $reg = mysql_fetch_array($hacerconsulta,MYSQL_BOTH); echo "</tr>"; } echo "</table>"; mysql_close($conexion); ?> <br /><br /> <form method="post" action="editarsavetracking.php"> Introduzca el N° de Guia que desea Editar<br /><br /> <table width="200" border="0" align="center"> <tr> <td align="center">N° Guia:</td> <td><label for="ideditar"></label> <input name="ideditar" type="text" id="ideditar" size="4" /> </td> <td> <input type="submit" name="editarid" value="Editar" /></td> </tr> </table> </form> <br /><br /> </div><!--Fin de marco contenedor--> </div><!--Fin de marco global--> <?php } else { session_destroy(); header("location:indexn.php"); } ?> </body> </html><file_sep>/reportarpagos.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!-- InstanceBegin template="/Templates/drfree24.dwt" codeOutsideHTMLIsLocked="false" --> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <!-- InstanceBeginEditable name="doctitle" --> <title>Drfree24</title> <!-- InstanceEndEditable --> <link href="style/style.css" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="ajx/ajxmenu.css" type="text/css" /> <!-- InstanceBeginEditable name="head" --> <script src="SpryAssets/SpryValidationTextField.js" type="text/javascript"></script> <link href="SpryAssets/SpryValidationTextField.css" rel="stylesheet" type="text/css" /> <!-- InstanceEndEditable --> </head> <body> <div id="MarcoGlobal"> <div id="Marcobusqueda"> <table width="980" border="0"> <tr> <form> <td width="69">&nbsp;</td> <td width="247">&nbsp;</td> <td width="118"><div id="tipodeletraserch"><strong><a href="midrfree24.php">Mi DrFree24</a></strong></div></td> <td width="87"><div id="tipodeletraserch"><strong><a href="http://sistema.sfdservices.com/clientes/sfd/registrarusuario.asp">Tracking</a></strong></div></td> <td width="138"><div id="tipodeletraserch"><strong><a href="intranet/index.html">Intranet</a></strong></div></td> <td width="39">Search:</td> <td width="150"><label for="Serch"></label> <input type="text" name="Serch" id="Serch" /></td> <td width="54"><input type="submit" name="button" id="button" value="Buscar" /></td> <td width="20">&nbsp;</td> </form> </tr> </table> </div><!--DIN DE MENU DE BUSQUEDA--> <div id="Marcomenu"> <div class="AJXMenuQAcHDSC"><!-- AJXFILE:ajx/ajxmenu.css --> <div class="ajxmw1"> <div class="ajxmw2"> <ul> <li><a href="index.php"><b>Inicio</b></a></li> <li><a href="misionvision.html"><b>¿Quienes Somos?</b></a></li> <li><a href="servicios.html"><b>Servicios</b></a></li> <li><a href="comprasusa.html"><b>Comprar en USA</b></a></li> <li><a href="giftcard.html"><b>Gift Cards</b></a></li> <li><a href="galeria.php"><b>Galerias</b></a></li> <li><a href="precios.html"><b>Precios</b></a></li> <li><a href="contactanos.html"><b>Contacto</b></a></li> </ul> </div> </div> <br /> </div> </div><!--Fin de MARCO MENU --><!-- InstanceBeginEditable name="EditRegion1" --> <div id="Marcobanner"><br /><br /> <table width="681" border="0" align="center"> <tr> <td align="center"><strong>Reporta el pago de tu Gift Card o Envios realizados</strong></td> </tr> </table> </div><!--Fin de MARCO BANNER --> <div id="Marcobanner"> <br /><br /> <form method="post" action="#"> <table width="300" border="0" align="center"> <tr> <td width="140">N° de Deposito<br />o Transferencia</td> <td width="144"><span id="sprytextfield1"> <label for="numero"></label> <input type="text" name="numero" id="numero" /> <span class="textfieldRequiredMsg">Campo Requerido.</span></span></td> </tr> <tr> <td>Banco Emisor:</td> <td><label for="banco"></label> <select name="banco" id="banco"> <option selected="selected">Mercantil</option> <option>Banesco</option> <option>Venezuela</option> <option>Provincial</option> </select></td> </tr> <tr> <td>Fecha:</td> <td><span id="sprytextfield2"> <label for="fecha"></label> <input type="text" name="fecha" id="fecha" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Nombre del depositante:</td> <td><span id="sprytextfield3"> <input type="text" name="nombredep" id="nombredep" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Tu nombre:</td> <td><span id="sprytextfield4"> <label for="nombre"></label> <input type="text" name="nombre" id="nombre" /> <span class="textfieldRequiredMsg">Campo Requerido.</span></span></td> </tr> <tr> <td>Telefono:</td> <td><span id="sprytextfield5"> <label for="telefono"></label> <input type="text" name="telefono" id="telefono" /> <span class="textfieldRequiredMsg">Campo Requerido.</span></span></td> </tr> <tr> <td>Monto:</td> <td><span id="sprytextfield6"> <label for="monto"></label> <input type="text" name="monto" id="monto" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> <tr> <td>Correo:</td> <td><span id="sprytextfield7"> <label for="correo"></label> <input type="text" name="correo" id="correo" /> <span class="textfieldRequiredMsg">A value is required.</span></span></td> </tr> </table> <br /> <table width="200" border="0" align="center"> <tr> <td align="center"><input type="submit" name="reportarpago" value="Reportar Pago"></td> </tr> </table> <p>&nbsp;</p> </form> <br /><br /> <?php if (isset($_POST['reportarpago'])) { $body='Se ha reportado un nuevo pago Numero de Deposito o Transferencia: '.$_POST['numero'].' Banco: '.$_POST['banco'].' Monto: '.$_POST['monto'].' Fecha: '.$_POST['fecha'].' Nombre del Depositante: '.$_POST['nombredep'].' Nombre: '.$_POST['nombre'].' Telefono: '.$_POST['telefono'].' Correo: '.$_POST['correo'].' '; $para="<EMAIL>"; $desde = $_POST["nombre"]; $mensaje = $body; $asunto = "Nuevo Reporte de Pago "; $desde = $_POST["nick"]; $mensaje = $body; $cabeceras = ""; $cabeceras = "MIME-VErsion: 1.0 \r\n"; $cabeceras = "Content-Type: text/html; charset=iso-8859-1\r\n"; $cabeceras = "To: " . $_POST ["nick"] . "\r\n"; $cabeceras = "From: " . $_POST ["correo"] . "\r\n"; mail ($para, $asunto, $mensaje, $cabeceras); echo" Su pago ha sido reportado con exito"; } ?> </div><!--Fin de MARCO CONTENIDO 1--> <script type="text/javascript"> var sprytextfield1 = new Spry.Widget.ValidationTextField("sprytextfield1"); var sprytextfield2 = new Spry.Widget.ValidationTextField("sprytextfield2"); var sprytextfield4 = new Spry.Widget.ValidationTextField("sprytextfield4"); var sprytextfield5 = new Spry.Widget.ValidationTextField("sprytextfield5"); var sprytextfield3 = new Spry.Widget.ValidationTextField("sprytextfield3"); var sprytextfield6 = new Spry.Widget.ValidationTextField("sprytextfield6"); var sprytextfield7 = new Spry.Widget.ValidationTextField("sprytextfield7"); </script> <!-- InstanceEndEditable --> <div id="marcoredsocial"> <table width="200" height="60" border="0" align="right"> <tr> <td><a href="#"><img src="img/face.png" width="44" height="39" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/twit.png" width="40" height="40" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/play.png" width="40" height="41" style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40"/></a></td> <td><a href="#"><img src="img/v.png" width="42" height="40"style="opacity:0.4;filter:alpha(opacity=40)" onmouseover="this.style.opacity=1;this.filters.alpha.opacity=100" onmouseout="this.style.opacity=0.4;this.filters.alpha.opacity=40" /></a></td> </tr> </table> </div> <!--Fin de MARCO RED SOCIAL--> <div id="marcodireccion">Dr.free24 Transporte Importador C.A<br /> Derechos Reservados DrFree24 C.A | RIF J-30943606-6 //<a href="politicas.html">Politicas Generales</a><br /> Desing By: <a href="http://www.systemsadms.com">Systems Admins C.A</a></div><!--Fin de MARCO DIRECCION--> </div><!--Fin de MARCO Global --> </body> <!-- InstanceEnd --></html> <file_sep>/intranet/inc/trackingbd.php <?php define ("salto","\n<br>\n"); $conexion=mysql_connect("localhost","drfree24db","222702qaaq"); $baseDeDatos=mysql_select_db("drwebs_",$conexion); ?> <file_sep>/prueba/PHPparte2.php <?php ## ## RECIBIR FORMULARIO ## Aqui pueden ir los campos que uno quiera ## if(isset($_POST['submit'])){ // comprobamos que se ha enviado el formulario // comprobar que han seleccionado una foto if($_FILES['foto']['name'] != ""){ // El campo foto contiene una imagen... // Primero, hay que validar que se trata de un JPG/GIF/PNG $allowedExts = array("jpg", "jpeg", "gif", "png", "JPG", "GIF", "PNG"); $extension = end(explode(".", $_FILES["foto"]["name"])); if ((($_FILES["foto"]["type"] == "image/gif") || ($_FILES["foto"]["type"] == "image/jpeg") || ($_FILES["foto"]["type"] == "image/png") || ($_FILES["foto"]["type"] == "image/pjpeg")) && in_array($extension, $allowedExts)) { // el archivo es un JPG/GIF/PNG, entonces... $extension = end(explode('.', $_FILES['foto']['name'])); $foto = substr(md5(uniqid(rand())),0,10).".".$extension; $directorio = "img"; // directorio de tu eleccin // almacenar imagen en el servidor move_uploaded_file($_FILES['foto']['tmp_name'], $directorio.'/'.$foto); $minFoto = 'min_'.$foto; $resFoto = 'res_'.$foto; resizeImagen($directorio.'/', $foto, 300, 100,$minFoto,$extension); //resizeImagen($directorio.'/', $foto, 500, 500,$resFoto,$extension); unlink($directorio.'/'.$foto); echo "Si funciona el mensaje"; } else { // El archivo no es JPG/GIF/PNG $malformato = $_FILES["foto"]["type"]; header("Location: cargarImagen.php?error=noFormato&formato=$malformato"); exit; } } else { // El campo foto NO contiene una imagen header("Location: cargarImagen.php?error=noImagen"); exit; } } // fin del submit #### ## Funcin para redimencionar las imgenes ## utilizando las liberas de GD de PHP #### function resizeImagen($ruta, $nombre, $alto, $ancho,$nombreN,$extension){ $rutaImagenOriginal = $ruta.$nombre; if($extension == 'GIF' || $extension == 'gif'){ $img_original = imagecreatefromgif($rutaImagenOriginal); } if($extension == 'jpg' || $extension == 'JPG'){ $img_original = imagecreatefromjpeg($rutaImagenOriginal); } if($extension == 'png' || $extension == 'PNG'){ $img_original = imagecreatefrompng($rutaImagenOriginal); } $max_ancho = $ancho; $max_alto = $alto; list($ancho,$alto)=getimagesize($rutaImagenOriginal); $x_ratio = $max_ancho / $ancho; $y_ratio = $max_alto / $alto; if( ($ancho <= $max_ancho) && ($alto <= $max_alto) ){//Si ancho $ancho_final = $ancho; $alto_final = $alto; } elseif (($x_ratio * $alto) < $max_alto){ $alto_final = ceil($x_ratio * $alto); $ancho_final = $max_ancho; } else{ $ancho_final = ceil($y_ratio * $ancho); $alto_final = $max_alto; } $tmp=imagecreatetruecolor($ancho_final,$alto_final); imagecopyresampled($tmp,$img_original,0,0,0,0,$ancho_final, $alto_final,$ancho,$alto); imagedestroy($img_original); $calidad=70; imagejpeg($tmp,$ruta.$nombreN,$calidad); } ?><file_sep>/intranet/cargartracking.php <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> <link href="style/estilo.css" rel="stylesheet" type="text/css" /> </head> <body> <?php session_start(); if ($_SESSION['admin'] == 'drfree24') { $id = $_POST['cargarguias']; ?> <div id="MarcoGlobal"> <div id="Marcohead"></div><!--Fin de marco head--> <div id="MarcoMenu"> / <a href="control.php">Volver a buscador</a> / <a href="destruir.php">Cerrar Admin Center</a> / </div> <!--Fin de marco menu--> <div id="Marcocontenedor"> <br /><br /> <b> Cargar Nuevo Tracking</b> <br /><br /> <form action="#" method="POST" enctype="multipart/form-data"> <table width="498" border="0" align="center"> <tr> <td width="199">Usuario N°:</td> <td width="287"><label for="id"></label> <input type="text" name="id" id="id" value="<?php echo $id;?>"/></td> </tr> <tr> <td>N° Tracking:</td> <td><label for="tracking"></label> <input type="text" name="tracking" id="tracking" /></td> </tr> <tr> <td>Direccion de Envio:</td> <td><label for="direccion"></label> <input type="text" name="direccion" id="direccion" /></td> </tr> <tr> <td>Libras:</td> <td><label for="direccion"></label> <input type="text" name="libras" id="libras" /></td> </tr> <tr> <td>Dimensiones:</td> <td><label for="direccion"></label> <input type="text" name="dimension" id="dimension" /></td> </tr> <tr> <td>Volumen:</td> <td><label for="direccion"></label> <input type="text" name="volumen" id="volumen" /></td> </tr> <tr> <td>Costo de Envio:</td> <td><label for="direccion"></label> <input type="text" name="costo" id="costo" /></td> </tr> <tr> <td>Descripcion:</td> <td><label for="des"></label> <textarea name="des" id="des" cols="45" rows="5"></textarea></td> </tr> <tr> <td>Cargar Foto del Paquete:</td> <td><input type="file" name="foto" /></td> </tr> <tr> <td>&nbsp;</td> <td><input type="hidden" value="<?php echo $id;?>" name="cargarguias"/></td> </tr> <tr> <td>&nbsp;</td> <td><table width="200" border="0"> <tr> <td align="center"><input type="submit" name="guardartracking" value="Guardar"></td> <td align="center"><input type="reset" name="borrar" value="Borrar"></td> </tr> </table></td> </tr> </table> <br /><br /> </form> <?php if(isset($_POST['guardartracking'])){ #### ## Funcin para redimencionar las imgenes ## utilizando las liberas de GD de PHP #### function resizeImagen($ruta, $nombre, $alto, $ancho,$nombreN,$extension){ $rutaImagenOriginal = $ruta.$nombre; if($extension == 'GIF' || $extension == 'gif'){ $img_original = imagecreatefromgif($rutaImagenOriginal); } if($extension == 'jpg' || $extension == 'JPG'){ $img_original = imagecreatefromjpeg($rutaImagenOriginal); } if($extension == 'png' || $extension == 'PNG'){ $img_original = imagecreatefrompng($rutaImagenOriginal); } $max_ancho = $ancho; $max_alto = $alto; list($ancho,$alto)=getimagesize($rutaImagenOriginal); $x_ratio = $max_ancho / $ancho; $y_ratio = $max_alto / $alto; if( ($ancho <= $max_ancho) && ($alto <= $max_alto) ){//Si ancho $ancho_final = $ancho; $alto_final = $alto; } elseif (($x_ratio * $alto) < $max_alto){ $alto_final = ceil($x_ratio * $alto); $ancho_final = $max_ancho; } else{ $ancho_final = ceil($y_ratio * $ancho); $alto_final = $max_alto; } $tmp=imagecreatetruecolor($ancho_final,$alto_final); imagecopyresampled($tmp,$img_original,0,0,0,0,$ancho_final, $alto_final,$ancho,$alto); imagedestroy($img_original); $calidad=70; imagejpeg($tmp,$ruta.$nombreN,$calidad); } $largo=2; $str = "abcdefghijklmnopqrstuvwxyz"; $may = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; $num = "1234567890"; $cad = ""; # Comienzo de la generacion de clave. $cad = substr($may ,rand(0,24),1); $cad .= substr($num ,rand(0,10),1); $cad .= substr($num ,rand(0,10),1); for($i=0; $i<$largo; $i++) { $cad .= substr($str,rand(0,24),1); } ; // comprobamos que se ha enviado el formulario // comprobar que han seleccionado una foto if($_FILES['foto']['name'] != ""){ // El campo foto contiene una imagen... // Primero, hay que validar que se trata de un JPG/GIF/PNG $allowedExts = array("jpg", "jpeg", "gif", "png", "JPG", "GIF", "PNG"); $extension = end(explode(".", $_FILES["foto"]["name"])); if ((($_FILES["foto"]["type"] == "image/gif") || ($_FILES["foto"]["type"] == "image/jpeg") || ($_FILES["foto"]["type"] == "image/png") || ($_FILES["foto"]["type"] == "image/pjpeg")) && in_array($extension, $allowedExts)) { // el archivo es un JPG/GIF/PNG, entonces... $extension = end(explode('.', $_FILES['foto']['name'])); $foto = $cad . ".".$extension; $directorio = "img"; // directorio de tu eleccin // almacenar imagen en el servidor move_uploaded_file($_FILES['foto']['tmp_name'], $directorio.'/'.$foto); $minFoto = 'guia_'.$foto; $resFoto = 'res_'.$foto; resizeImagen($directorio.'/', $foto, 800, 600,$minFoto,$extension); //resizeImagen($directorio.'/', $foto, 500, 500,$resFoto,$extension); unlink($directorio.'/'.$foto); echo "Se ha cargado el un nuevo tracking "; } else { // El archivo no es JPG/GIF/PNG $malformato = $_FILES["foto"]["type"]; echo "El tipo de archivo no es una imagen valida"; //header("Location: index.php"); exit; } } else { // El campo foto NO contiene una imagen echo "Se ha cargado un nuevo tracking sin imagen "; //echo "No contiene ninguna imagen"; //header("Location: index.php"); //exit; } } // fin del submit ?> <?php if (isset($_POST['guardartracking'])) { $id = $_POST['id']; $tracking = $_POST['tracking']; $direccion = $_POST['direccion']; $libras = $_POST['libras']; $volumen = $_POST['volumen']; $costo = $_POST['costo']; $des = $_POST['des']; $dimension = $_POST['dimension']; include ("inc/trackingbd.php"); $ssql = "SELECT * FROM guias WHERE tracking ='$tracking'"; $rs = mysql_query($ssql,$conexion); if (mysql_num_rows($rs)>0) { echo "<b>Este Numero de Tracking ya existe</b>"; echo "<br><br>"; mysql_close($conexion); }else { mysql_query ("INSERT INTO guias VALUES ('', '$id','$tracking','$direccion','$libras','$volumen','$costo','$des','$cad','$dimension')"); mysql_close ($conexion); echo "La guia se ha cargado con exito"; echo "<br><br>"; } }//aqui termina el beta ?> </div><!--Fin de marco contenedor--> </div><!--Fin de marco global--> <?php } else { session_destroy(); header("location:indexn.php"); } ?> </body> </html>
a3ae8649ea60ddb94d80cd266830608381fab1ce
[ "PHP" ]
30
PHP
Systemsadms/drfree24v1.0
663fe2c4c1e0a8891aa0c3111254ea6144e39dd2
3104103187a5fea83dae52cba5ba75d3e71e2bc9
refs/heads/master
<file_sep> //Including Express var express = require('express'); var app = express(); // app.use(function(req, res, next) { // res.header("Access-Control-Allow-Origin", "*"); // res.header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept"); // next(); // }); //including express middleware //body-parser var bodyParser = require('body-parser'); //include mongoose var mongoose = require('mongoose'); mongoose.connect('mongodb://localhost/project-aardvark'); //define our schema //here at the schema is where you add fields and their rules for your databases var movieSchema = mongoose.Schema({ title: String, year_of_release: Number, category: String }); //compile our model //informs mongodb available var Movie = mongoose.model('Movie' , movieSchema); //adding the middleware app.use(bodyParser.urlencoded({extended: true})); //routing using express easy app.get('/movies', function(req, res){ Movie.find(function(err, movies){ if(err){ console.log(err); }else{ res.json(movies); } }); }); // movies =[ // { // title: 'Black Mass', // category: ['thriller', 'action', 'biopic'], // main_actors: [ // { // first_name: 'Johnny', // last_name: 'Depp' // }, // { // first_name: 'Benedict', // last_name: 'Cumberbatch' // }] // }, // { // title: 'Avengers: age of Ultron', // category: ['fantasy', 'action'], // main_actors: [ // { // first_name: 'Chris', // last_name: 'Evans' // }, // { // first_name: 'Robert', // last_name: 'Jr' // }] // }, // { // title: 'Straight-Outta-Compton', // category: ['drama' , ''], // main_actors: [ // { // first_name: 'Oshea', // last_name: 'Jackson' // }, // { // first_name: 'aldis', // last_name: 'Hodge' // } // ] // } // ]; app.post('/movies/new', function(req, res){ console.log(req.body); formdata = req.body; // //create an instance of a movie // var movie = new Movie( // { // title: formdata.title, // year_of_release: formdata.year_of_release // } // ); //the code commented below imports data from the mongodb onto the view of the app //thus the code above is rendered useless if the code below is initialized var movie = new Movie(formdata); movie.save(function(err, movie){ if(err){ console.log(err); }else{ console.log('succesfully saved the movie :-)'); // res.redirect('/movies'); } }); }); //dispatch is a route handler // var server = http.createServer( // dispatch({ // '/movies' : { // 'GET /' : function(request, response){ // movies =[ // { // title: 'Black Mass', // category: ['thriller', 'action', 'true-story'], // main_actors: [ // { // first_name: 'Johnny', // last_name: 'Depp' // }, // { // first_name: 'Benedict', // last_name: 'Cumberbatch' // }] // }, // { // title: 'Avengers: age of Ultron', // category: ['fanatsy', 'action'], // main_actors: [ // { // first_name: 'Chris', // last_name: 'Evans' // }, // { // first_name: 'Robert', // last_name: 'Jr' // }] // }, // { // title: 'Straight-Outta-Compton', // category: ['drama' , ''], // main_actors: [ // { // first_name: 'Oshea', // last_name: 'Jackson' // }, // { // first_name: 'aldis', // last_name: 'Hodge' // } // ] // } // ]; // response.end(JSON.stringify(movies)); // }, // '/POST': function(request, response, next){ // //get parameters from the form // var formdata; // request.on('data', function(chunk){ // formdata = querystring.parse(chunk.toString()); // }); // request.on('end', function(){ // console.log(formdata); // //create an instance of a movie // var movie = new Movie( // { // title: formdata.title, // year_of_release: formdata.year_of_release // } // ); // }); // //syntax for creation of a new instance ...... var name = new name({}) // } // } // }) // ); // console.log('visiting %s', request.url); // Access-Control-Allow-Origin allows browser to be accessed from other pages // response.writeHead(200,{ // 'content-type' : 'application/json', // ' Access-Control-Allow-Origin' : '*' // }); // response.end(JSON.stringify(message)); // // }, // // '/movies' : function (request,response){ // // console.log('visiting %s', request.url); // // response.end('this is the movies path'); // // }, // // '/actors' : function (request,response){ // // console.log('visiting %s', request.url); // // response.end('this is the actors path'); app.listen(8085, function(){ console.log('server runnning on http://127.0.0.1:8085'); });
fe5524ab08cee6da0c37a2f3dcce875dd8f27cdc
[ "JavaScript" ]
1
JavaScript
josephbill/MEAN
0f9c158e9d0655b0bfddde8ad4ab302419d3ccef
0ca8f976eeea120392ad090a0c8ce904ee4074c7
refs/heads/main
<repo_name>MarkVeinS/ProjetoFinal<file_sep>/projeto_banco/src/main/java/com/accenture/projeto/entity/Operacao.java package com.accenture.projeto.entity; public enum Operacao { SAQUE, DEPOSITO, TRANSFERENCIA } <file_sep>/projeto_banco/src/main/java/com/accenture/projeto/entity/Conta.java package com.accenture.projeto.entity; import java.util.List; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.ManyToOne; import javax.persistence.OneToMany; import javax.persistence.OneToOne; import javax.persistence.Table; @Entity @Table public class Conta { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private int idContaCorrente; @Column(nullable=false) private String contaCorrenteAgencia; @Column(nullable=false) private String contaCorrenteNumero; private double contaCorrenteSaldo; @ManyToOne private Cliente cliente; @OneToOne private Agencia agencia; @OneToMany private List<Extrato> extrato; public Conta(){ } public String getContaCorrenteAgencia() { return contaCorrenteAgencia; } public void setContaCorrenteAgencia(String contaCorrenteAgencia) { this.contaCorrenteAgencia = contaCorrenteAgencia; } public String getContaCorrenteNumero() { return contaCorrenteNumero; } public void setContaCorrenteNumero(String contaCorrenteNumero) { if(contaCorrenteNumero!=null) { this.contaCorrenteNumero = contaCorrenteNumero; }else { throw new NullPointerException("O numero da conta não pode ser vazio"); } } public double getContaCorrenteSaldo() { return contaCorrenteSaldo; } public void setContaCorrenteSaldo(double contaCorrenteSaldo) { this.contaCorrenteSaldo = contaCorrenteSaldo; } public int getIdContaCorrente() { return idContaCorrente; } public void setIdContaCorrente(int idContaCorrente) { this.idContaCorrente = idContaCorrente; } public Cliente getCliente() { return cliente; } public void setCliente(Cliente cliente) { this.cliente = cliente; } } <file_sep>/README.md # PROJETO SISTEMA BANCÁRIO Esse projeto faz parte do treinamento da Academia Java- Industria X da [Accenture](https://accenture.com.br/). O objetivo foi criar um sistema bancário que contenha uma API que irá realizar as seguintes transações bancárias: Sacar, Depositar e Transferir numerário usando o Spring Boot. ## Menu * 📚 [Dependências](#-dependências) * 📂 [Estruturação dos arquivos](#-estruturação-dos-arquivos) * 🖥️ [Trabalhando no projeto](#%EF%B8%8F-trabalhando-no-projeto) ### 📚 Bibliotecas O projeto foi feito utilizando [Spring Boot](https://) e [Java](https://www.java.org/). As principais dependências utilizadas foram: - [Spring Web ]() - [Spring Data JPA]() - [Spring Boot DevTools]() - [Driver MySQL]() A aplicação irá rodar na porta 8080: http://localhost:8080/ - Endpoints: /sacar /depositar /transferir /info ### 📂 Estruturação dos arquivos O repositório tem estas pastas: ``` sistema bancário . ├── projeto-final | │ ├── src / main / java │ ├── src / main / resources │ ├── src / test / java │ ├── Biblioteca do sistema JRE │ ├── Dependências Maven │ ├── src | | ├──HELP.md │ │ ├──mvnw │ │ ├──mvnw.cmd │ │ ├──pom.xml │ └── └── ``` ### 🖥️ Trabalhando no projeto A equipe organizou a tarefas no trello e se reuniu diariamente, realizando as deilys. ### 👩‍💻👨‍💻 Equipe Nossa equipe de desenvolvedores que construiu a aplicação. <table> <tr> <td align="center"><a href="https://github.com/MarkVeinS"><img style="border-radius: 50%;" src="https://avatars.githubusercontent.com/u/87442462?v=4" width="100px;" alt=""/><br /><sub><b><NAME></b></sub></a><br/> <img src="https://img.shields.io/badge/-Marcos-blue?style=flat-square&logo=Linkedin&logoColor=white" </td> <td align="center"><a href="https://github.com/melissapsilva"><img style="border-radius: 50%;" src="https://avatars.githubusercontent.com/u/87448254?v=4" width="100px;" alt=""/><br /><sub><b>Melissa</b></sub></a><br/> <img src="https://img.shields.io/badge/-Melissa-blue?style=flat-square&logo=Linkedin&logoColor=white" </td> <td align="center"><a href="https://github.com/micheleset7"><img style="border-radius: 50%;" src="https://avatars.githubusercontent.com/u/60739164?v=4" width="100px;" alt=""/><br /><sub><b><NAME></b></sub></a><br/> <a href="https://www.linkedin.com/in/michele-coelho-5017aa79/"><img src="https://img.shields.io/badge/-Michele-blue?style=flat-square&logo=Linkedin&logoColor=white"></a></td> </tr> </table> <file_sep>/projeto_banco/src/main/java/com/accenture/projeto/repository/ContaRepository.java package com.accenture.projeto.repository; import org.springframework.data.jpa.repository.JpaRepository; import org.springframework.stereotype.Repository; import com.accenture.projeto.entity.Conta; @Repository public interface ContaRepository extends JpaRepository<Conta, Integer> { } <file_sep>/projeto_banco/src/main/java/com/accenture/projeto/controller/ContaController.java package com.accenture.projeto.controller; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; import java.util.List; import java.util.Optional; import javax.validation.Valid; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.accenture.projeto.entity.Cliente; import com.accenture.projeto.entity.Conta; import com.accenture.projeto.exceptions.ExcecaoTransferencia; import com.accenture.projeto.repository.ClienteRepository; import com.accenture.projeto.repository.ContaRepository; @RestController public class ContaController { @Autowired private ContaRepository contaRepositorio; Conta conta; Conta conta2; double valorAtual; double valorAtual2; @GetMapping("/depositar") String Depositar(@RequestParam("valor")double valor,@RequestParam("id")int id) { conta = contaRepositorio.getById(id); valorAtual = conta.getContaCorrenteSaldo(); valorAtual += valor; conta.setContaCorrenteSaldo(valorAtual); contaRepositorio.save(conta); return "VALOR DEPOSITADO NA CONTA : R$ " + valor + " VALOR ATUAL : R$ "+ conta.getContaCorrenteSaldo(); } @GetMapping("/sacar") String Sacar(@RequestParam("valor")double valor,@RequestParam("id")int id) { conta = contaRepositorio.getById(id); valorAtual = conta.getContaCorrenteSaldo(); valorAtual-=valor; conta.setContaCorrenteSaldo(valorAtual); contaRepositorio.save(conta); return "VALOR SACADO DA CONTA : R$ " + valor + " VALOR ATUAL: R$ " + conta.getContaCorrenteSaldo(); } @RequestMapping(value="/transferir",method=RequestMethod.GET) String Transferir(@RequestParam("valor")double valor,@RequestParam("id")int id,@RequestParam("id2")int id2) throws ExcecaoTransferencia{ this.conta=contaRepositorio.getById(id); this.conta2=contaRepositorio.getById(id2); if(id2==0) { throw new ExcecaoTransferencia(); } valorAtual=conta.getContaCorrenteSaldo(); valorAtual2=conta2.getContaCorrenteSaldo(); valorAtual-=valor; valorAtual2+=valor; //salva conta 1 this.conta.setContaCorrenteSaldo(valorAtual); contaRepositorio.save(conta); //salva conta 2 this.conta2.setContaCorrenteSaldo(valorAtual2); contaRepositorio.save(conta2); return "O valor transferido foi de: " + valor + " --- " + "Conta 1 R$ : " + contaRepositorio.getById(id).getContaCorrenteSaldo() + " Conta 2 R$ : " + contaRepositorio.getById(id2).getContaCorrenteSaldo(); } @GetMapping(value="/saldo/{id}") String exibirSaldo(@PathVariable(value="id")int id) { Conta conta; conta = contaRepositorio.getById(id); return "Seu saldo e de: R$: " + conta.getContaCorrenteSaldo(); } @GetMapping(value="/info/{id}") String exibirInfo(@PathVariable(value="id")int id) { Conta conta; conta = contaRepositorio.getById(id); return "Conta Nº : " + conta.getContaCorrenteNumero() + "--- Agencia: " + conta.getContaCorrenteAgencia(); } @RequestMapping(value = "/conta", method = RequestMethod.GET) public List<Conta> Get() { return contaRepositorio.findAll(); } @RequestMapping(value = "/conta/{id}", method = RequestMethod.GET) public ResponseEntity<Conta> GetById(@PathVariable(value = "id") int id) { Optional<Conta> conta = contaRepositorio.findById(id); if(conta.isPresent()) return new ResponseEntity<Conta>(conta.get(), HttpStatus.OK); else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } @RequestMapping(value = "/conta", method = RequestMethod.POST) public Conta Post(@RequestParam("agencia") String agencia, @RequestParam("numero") String numero, @RequestParam("saldo") double saldo,Conta conta) { conta.setContaCorrenteAgencia(agencia); conta.setContaCorrenteNumero(numero); conta.setContaCorrenteSaldo(saldo); return contaRepositorio.save(conta); } @RequestMapping(value = "/conta/{id}", method = RequestMethod.PUT) public ResponseEntity<Conta> Put(@PathVariable(value = "id") int id, @Valid @RequestBody Conta newConta) { Optional<Conta> oldConta = contaRepositorio.findById(id); if(oldConta.isPresent()){ Conta conta = oldConta.get(); conta.setContaCorrenteNumero(newConta.getContaCorrenteNumero()); contaRepositorio.save(conta); return new ResponseEntity<Conta>(conta, HttpStatus.OK); } else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } @RequestMapping(value = "/conta/{id}", method = RequestMethod.DELETE) public ResponseEntity<Object> Delete(@PathVariable(value = "id") int id) { Optional<Conta> conta = contaRepositorio.findById(id); if(conta.isPresent()){ contaRepositorio.delete(conta.get()); return new ResponseEntity<>(HttpStatus.OK); } else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } } <file_sep>/projeto_banco/src/main/java/com/accenture/projeto/exceptions/ExcecaoTransferencia.java package com.accenture.projeto.exceptions; public class ExcecaoTransferencia extends Exception { /** * */ private static final long serialVersionUID = 1L; public String getMessage() { //3 - Enter a message in it and return the same System.out.println("Passou por aqui"); return "Pegou mensagem"; } @Override public String toString() { return "CustomException [getMessage()=" + getMessage() + "]"; } } <file_sep>/projeto_banco/src/main/java/com/accenture/projeto/controller/ClienteController.java package com.accenture.projeto.controller; import java.util.List; import java.util.Optional; import javax.validation.Valid; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.accenture.projeto.entity.Cliente; import com.accenture.projeto.entity.Conta; import com.accenture.projeto.repository.ClienteRepository; @RestController public class ClienteController { @Autowired private ClienteRepository clienteRepositorio; @RequestMapping(value = "/cliente", method = RequestMethod.GET) public List<Cliente> Get() { return clienteRepositorio.findAll(); } @RequestMapping(value = "/cliente/{id}", method = RequestMethod.GET) public ResponseEntity<Cliente> GetById(@PathVariable(value = "id") int id) { Optional<Cliente> cliente = clienteRepositorio.findById(id); if(cliente.isPresent()) return new ResponseEntity<Cliente>(cliente.get(), HttpStatus.OK); else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } /*@RequestMapping(value = "/cliente", method = RequestMethod.POST) public Cliente Post(@Valid @RequestBody Cliente cliente) { return clienteRepositorio.save(cliente); }*/ @RequestMapping(value = "/cliente", method = RequestMethod.POST) public Cliente Post(@RequestParam("nome") String nome, @RequestParam("cpf") String cpf, @RequestParam("telefone") String telefone,Cliente cliente) { cliente.setClienteNome(nome); try { cliente.setClienteCPF(cpf); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); e.getMessage(); } cliente.setClienteFone(telefone); return clienteRepositorio.save(cliente); } @RequestMapping(value = "/cliente/{id}", method = RequestMethod.PUT) public ResponseEntity<Cliente> Put(@PathVariable(value = "id") int id, @Valid @RequestBody Cliente newCliente) { Optional<Cliente> oldCliente = clienteRepositorio.findById(id); if(oldCliente.isPresent()){ Cliente cliente = oldCliente.get(); cliente.setClienteNome(newCliente.getClienteNome()); clienteRepositorio.save(cliente); return new ResponseEntity<Cliente>(cliente, HttpStatus.OK); } else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } @RequestMapping(value = "/cliente/{id}", method = RequestMethod.DELETE) public ResponseEntity<Object> Delete(@PathVariable(value = "id") int id) { Optional<Cliente> cliente = clienteRepositorio.findById(id); if(cliente.isPresent()){ clienteRepositorio.delete(cliente.get()); return new ResponseEntity<>(HttpStatus.OK); } else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } } <file_sep>/projeto_banco/src/main/java/com/accenture/projeto/repository/ExtratoRepository.java package com.accenture.projeto.repository; import org.springframework.data.jpa.repository.JpaRepository; import com.accenture.projeto.entity.Extrato; public interface ExtratoRepository extends JpaRepository<Extrato,Integer> { } <file_sep>/projeto_banco/src/main/java/com/accenture/projeto/controller/AgenciaController.java package com.accenture.projeto.controller; import java.util.List; import java.util.Optional; import javax.validation.Valid; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.accenture.projeto.entity.Agencia; import com.accenture.projeto.entity.Conta; import com.accenture.projeto.repository.AgenciaRepository; @RestController public class AgenciaController { @Autowired private AgenciaRepository agenciaRepositorio; @RequestMapping(value = "/agencia", method = RequestMethod.GET) public List<Agencia> Get() { return agenciaRepositorio.findAll(); } @RequestMapping(value = "/agencia/{id}", method = RequestMethod.GET) public ResponseEntity<Agencia> GetById(@PathVariable(value = "id") int id) { Optional<Agencia> agencia = agenciaRepositorio.findById(id); if (agencia.isPresent()) return new ResponseEntity<Agencia>(agencia.get(), HttpStatus.OK); else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } @RequestMapping(value = "/agencia", method = RequestMethod.POST) public Agencia Post(@Valid @RequestBody Agencia agencia) { return agenciaRepositorio.save(agencia); } @RequestMapping(value = "/agencia/{id}", method = RequestMethod.PUT) public ResponseEntity<Agencia> Put(@PathVariable(value = "id") int id, @Valid @RequestBody Agencia newAgencia) { Optional<Agencia> oldAgencia = agenciaRepositorio.findById(id); if (oldAgencia.isPresent()) { Agencia agencia = oldAgencia.get(); agencia.setNomeAgencia(newAgencia.getNomeAgencia()); agenciaRepositorio.save(agencia); return new ResponseEntity<Agencia>(agencia, HttpStatus.OK); } else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } @RequestMapping(value = "/agencia/{id}", method = RequestMethod.DELETE) public ResponseEntity<Object> Delete(@PathVariable(value = "id") int id) { Optional<Agencia> agencia = agenciaRepositorio.findById(id); if (agencia.isPresent()) { agenciaRepositorio.delete(agencia.get()); return new ResponseEntity<>(HttpStatus.OK); } else return new ResponseEntity<>(HttpStatus.NOT_FOUND); } }
5c1062fb84cbd22f724dfbb1c8d662e2f0d39be4
[ "Markdown", "Java" ]
9
Java
MarkVeinS/ProjetoFinal
3eb7f27927a3519570bfcc59f82db15fe87c7464
8d26e41c4d2f886fbfacaba31d48100394756300
refs/heads/master
<repo_name>booknara/AndroidDeviceAdmin<file_sep>/README.md Disable the Force Stop & Uninstall button in Manage Application using Device Administration in Android. And also, you can change Device Administration permission by setting /res/xml/uses_policies.xml - limit-password : Set password rules - watch-login : Monitor screen-unlock attempts - reset-password : Change the screen-unlock password - force-lock : Lock the screen - wipe-data : Erase all data - expire-password : Set lock-screen password expiration - encrypted-storage : Set storage encryption - disable-camera : Disable cameras If you have any questions or bug reports, please send me a message(@daniel_booknara) or leave an issue ticket.<file_sep>/src/com/booknara/deviceadmin/DeviceAdminUtil.java package com.booknara.deviceadmin; import android.app.Activity; import android.app.admin.DevicePolicyManager; import android.content.ComponentName; import android.content.Context; import android.content.Intent; public class DeviceAdminUtil { public static final int DEVICE_ADMIN_REQUEST = 9; private static DevicePolicyManager mDevicePolicyManager; private static ComponentName mComponentName; // Suppress default constructor for noninstantiability private DeviceAdminUtil() { } public static DevicePolicyManager getDevicePolicyManager() { return mDevicePolicyManager; } public static void setDevicePolicyManager( final DevicePolicyManager devicePolicyManager) { mDevicePolicyManager = devicePolicyManager; } public static ComponentName getComponentName() { return mComponentName; } public static void setComponentName(final ComponentName componentName) { mComponentName = componentName; } public static void initDPM(final Activity activity) { if (mDevicePolicyManager == null) { setDevicePolicyManager((DevicePolicyManager) activity .getSystemService(Context.DEVICE_POLICY_SERVICE)); } } public static <T> void initComponent(final Activity activity, final Class<T> reciever) { if (mComponentName == null) { setComponentName(new ComponentName(activity, reciever)); } } public static boolean isDeviceAdmin() { return mDevicePolicyManager.isAdminActive(mComponentName); } public static void registerDeviceAdmin(final Activity activity, final int requestCode) { Intent intent = new Intent(DevicePolicyManager.ACTION_ADD_DEVICE_ADMIN); intent.putExtra(DevicePolicyManager.EXTRA_DEVICE_ADMIN, getComponentName()); intent.putExtra(DevicePolicyManager.EXTRA_ADD_EXPLANATION, "Needs to run as a Device Administrator to prevent uninstalling the app"); activity.startActivityForResult(intent, requestCode); } public static void unregisterDeviceAdmin() { if (mDevicePolicyManager.isAdminActive(mComponentName)) mDevicePolicyManager.removeActiveAdmin(mComponentName); } }
32767032ac14996707386b0dc091fc8e250b917b
[ "Markdown", "Java" ]
2
Markdown
booknara/AndroidDeviceAdmin
cfcc553256b3d7734b67f4a9764fd46a82e5ef9f
c27947fa71c3c0188ff95d6e3159aa5da2cdc46b
refs/heads/master
<repo_name>rafaelbernard/doit<file_sep>/doit-example #!/usr/bin/env ruby #Just a simple example command. puts "It worked! 🎩✨"<file_sep>/doit-network-usage-report #!/bin/bash sudo vnstat -q <file_sep>/doit-update #!/usr/bin/env ruby # Installs the latest version of these tools. require 'tmpdir' require 'shellwords' # # Update this to point to wherever you're hosting your tools. If you're not hosting with git, # change the `fetch_latest_code!` function below to suit your scenario. # SOURCE_REPO = 'https://github.com/rafaelbernard/doit.git' # # This message is shown if updating the tools fails for any reason; you can customize this to tell # people where to go ask for help if they encounter trouble. # UPDATE_FAILED_MESSAGE = '' # # Gets the latest version of the tools and copies them into the current working directory. # def fetch_latest_code! `git clone --quiet #{Shellwords.escape(SOURCE_REPO)} .` end # # Returns an array containing the time the tools were updated and which version they are now at. # def get_tools_version_info timestamp, sha1 = `git log -1 --pretty='%at,%h'`.strip.split(',') [ Time.at(timestamp.to_i), sha1 ] end DESTINATION_DIR = File.dirname(__FILE__) unless File.stat(DESTINATION_DIR).writable? puts "Looks like you don't have write permissions to #{DESTINATION_DIR}. Run this command to fix it and try again:" puts " sudo chown -R `whoami` #{DESTINATION_DIR}" abort end Dir.mktmpdir do |dir| Dir.chdir dir do fetch_latest_code! `make install_quiet` if $?.exitstatus == 0 timestamp, version = get_tools_version_info puts "Updated tools to #{version} (#{timestamp})" else puts "Eep, updating tools failed. :( #{UPDATE_FAILED_MESSAGE}" abort end end end <file_sep>/doit-after-fresh-ubuntu-install #!/usr/bin/env bash set -x echo "aptitude" sudo apt-get install -y aptitude echo "Installing git" sudo aptitude install -y git echo "medias and extras" sudo aptitude install -y ubuntu-restricted-extras vlc echo "dev tools" sudo aptitude install -y httpie # For laptops # todo - check #echo "laptop-mode-tools" #sudo aptitude install -y laptop-mode-tools sudo aptitude install -y tlp tlp-rdw tp-smapi-dkms acpi-call-dkms thermald <file_sep>/Makefile # # To use this Makefile with your version of magic-cli, change the value of # PREFIX to whatever you have renamed the main command to. # PREFIX = doit # # Where to install the tools # DESTINATION_DIR = /usr/local/bin # # Files to install # FILES = \ ${PREFIX} \ ${PREFIX}-* # # Rules # install: install_quiet announce_installation announce_installation: @echo "OK, ${PREFIX} command line tools have been installed. 🎉 Here's what's available:\n" && ${PREFIX} --list install_quiet: @install -m 755 -p $(FILES) ${DESTINATION_DIR} uninstall: sh -c "cd ${DESTINATION_DIR} && rm ${PREFIX} && rm ${PREFIX}-*" <file_sep>/doit-daily-check-arch #!/usr/bin/env /bin/bash # my daily check routine CMD="sudo doit update" echo "doit update" echo $CMD eval $CMD echo "pacman update" sudo pacman -Syu # showing my dotfiles status cd ~/git/pessoal/dotfiles git status cd - <file_sep>/doit-useful-ubuntu #!/usr/bin/env bash set -x echo "aptitude" sudo apt-get install -y aptitude echo "essentials" sudo aptitude install -y ssh echo "dev tools" sudo aptitude install -y httpie git echo "tools" sudo aptitude install -y tmux kitty zsh echo "Installing utils" sudo aptitude install -y default-jdk echo "medias and extras" sudo aptitude install -y ubuntu-restricted-extras vlc # For laptops # todo - check echo "laptops" sudo aptitude install -y tlp tlp-rdw tp-smapi-dkms acpi-call-dkms thermald <file_sep>/doit-useful-arch #!/usr/bin/env bash set -x # Window manager sudo pacman -Sy i3 i3lock # Essentials sudo pacman -Sy zsh fzf git ssh# tools # tools sudo pacman -Sy tmux kitty # Utils sudo pacman -Sy parcellite imagemagick scort <file_sep>/README.md # doit-cli **A foundation for building your own suite of command line tools.** This is inspired by [Slack magic-cli](https://medium.com/@SlackEng/4a1bb5fe905b). magic-cli exists to make it easy to create a set of tools that work together. It's not a tool you use as-is; it's here to offer a starting point for your own custom command line tools. Learn more about the origins of magic-cli in [The Joy of Internal Tools](https://medium.com/@SlackEng/4a1bb5fe905b), a post on the Slack Engineering blog. ## Installation This repository includes a Makefile that will install `doit-cli` and all of its subcommands into `/usr/local/bin`: ````bash $ make install ```` You can also use it to uninstall `doit-cli`: ````bash $ make uninstall ```` ## Updates An example script for updating the tools is also included; it makes installing the latest tools into a single step process: ```bash $ doit-cli update Updated tools to 01ec2ef (2016-03-30 16:20:30 -0700) ``` ## Reunning directly Some scripts can be run directly, without cloning the repo: 1. Useful ubuntu: `wget https://raw.githubusercontent.com/rafaelbernard/doit/master/doit-useful-ubuntu -O - | sudo bash` ## Links https://github.com/slackhq/magic-cli https://slack.engineering/the-joy-of-internal-tools-4a1bb5fe905b#.fi5lnlxnk <file_sep>/doit-daily-check #!/bin/sh # my daily check routine source platform echo "Network usage" if ! [ -x "$(command -v vnstat)" ]; then echo "vnstat not found. Skiping network usage." else doit network-usage-report fi echo "package updates" echo "Platform $platform" if ! [ -x "$(command -v aptitude)" ]; then echo "aptitude not found. Using apt-get." sudo apt-get update && sudo apt-get upgrade #echo 'Error: aptitude is not installed. Use apt-get.' #exit 1 else echo "aptitude found. Using aptitude." sudo aptitude update && sudo aptitude safe-upgrade fi sleep 2 echo "# Verifying snap updates" sudo snap refresh echo "# Verifying firmware updates" sudo fwupdmgr refresh && sudo fwupdmgr update if [ -f /var/run/reboot-required ] then echo "\n\nReboot required!" fi
523f944107dc1d2743d69e80891b7584d16d06b8
[ "Makefile", "Ruby", "Markdown", "Shell" ]
10
Ruby
rafaelbernard/doit
03b50690b8eaee1eb046591db6f7c53d657c91ef
1b68e26bf081788941b190169c2437dc25fd490a
refs/heads/master
<repo_name>training4developers/angular_05082017<file_sep>/src/ts/app/color-tool/services/colors.service.ts import { Injectable } from "@angular/core"; import { Http } from "@angular/http"; import { Color } from "../models/color"; @Injectable() export class ColorsService { // private colors: Color[] = [ // { id: 1, name: "red" }, // { id: 2, name: "gold" }, // { id: 3, name: "green" }, // { id: 4, name: "white" }, // { id: 5, name: "saffron" }, // { id: 6, name: "blue" }, // ]; constructor(private http: Http) { } public getAll(): Promise<Color[]> { return this.http .get("http://localhost:3010/colors") .map((res) => res.json()) .toPromise(); } public append(newColor: Color) { // newColor.id = this.colors.reduce( // (maxId, color) => Math.max(maxId, color.id), 0) + 1; // this.colors = this.colors.concat(newColor); return this; } } <file_sep>/src/ts/app/shared/components/unordered-list.component.ts import { Component, Input } from "@angular/core"; @Component({ selector: "unordered-list", template: ` <ul> <li *ngFor="let listItem of listItems"> {{listItem}} <button type="button" (click)="clickMe(listItem)"> Click Me! </button> </li> </ul> `, }) export class UnorderedListComponent { @Input() public listItems: string[]; public clickMe(value: string) { console.log("I was clicked!", value); } } <file_sep>/src/ts/app/color-tool/pipes/ellipsis.pipe.ts import { Pipe, PipeTransform } from "@angular/core"; @Pipe({ name: "ellipsis", }) export class EllipsisPipe implements PipeTransform { public transform(value: any, len: any) { const theValue = String(value); const theLen = Number(len ? len : 10); if (theValue.length > theLen) { return theValue.slice(0, theLen) + "..."; } return theValue; } }
26d739cb2543e9efa5f5d036fd9d35d070fe7b83
[ "TypeScript" ]
3
TypeScript
training4developers/angular_05082017
633ed984db94e8fe64788f11633d20e8828c1d39
93d4429a606a5739e62b4d39f02fd9c801ea36cf
refs/heads/master
<file_sep><?php $restaurants = []; $response = "hugahuag"; ?><file_sep><?php $dic = ["x"=> 10, "y"=> 20]; $dic["y"] = 40; var_dump($dic) ?><file_sep><?php $numlist = ["one","two","three"]; print($numlist[1]); $stringlist = []; array_push($stringlist,"samurai"); print($stringlist[0]); ?><file_sep><?php $name ="宮川風花"; print($name); ?><file_sep><?php $hello = "Hello, PHP!\n"; $num = 42; print($hello); print($num); ?>
bb810f9236368223e8b6ff33c4b77a7e2d148cc1
[ "PHP" ]
5
PHP
MiyagawaF/samurai_HTML-CSS
7d75c43d255908b63bb84164e6ece51ba98307c9
0573205f8878d1b00dcc7600b8648760e63b3eda
refs/heads/master
<file_sep>package by.gsu.epamlab.service; import by.gsu.epamlab.dao.NoteDao; import by.gsu.epamlab.model.Note; import org.springframework.stereotype.Service; import javax.transaction.Transactional; import java.util.List; /** * Created by Gleb on 23.04.2017. */ @Service public class NoteServiceImpl implements NoteService { private NoteDao noteDao; public void setNoteDao(NoteDao noteDao){ this.noteDao = noteDao; } @Override @Transactional public void addNote(Note note) { this.noteDao.addNote(note); } @Override @Transactional public void updateNote(Note note) { this.noteDao.updateNote(note); } @Override @Transactional public void removeNote(int id) { this.noteDao.removeNote(id); } @Override @Transactional public Note getNoteById(int id) { return this.noteDao.getNoteById(id); } @Override @Transactional public List<Note> noteList() { return this.noteDao.noteList(); } } <file_sep>jdbc.driverClassName=com.mysql.jdbc.Driver jdbc.url=jdbc:mysql://localhost:3306/notes jdbc.username=root jdbc.password=
4c025b6bace64a5d9171706dc4a829c84e8e554c
[ "Java", "INI" ]
2
Java
amf0154/notes
918547f3da37c9b3d1ffd936a66917f3f46ed0db
748564e16c9e8bead11657da01519a8680310fc3
refs/heads/master
<file_sep>#!/usr/bin/env bash set -e for domain in $RENEWED_DOMAINS; do case $domain in {% for domain in certbot_certs %} {% if domain.deploy is defined %} {{ domain.domains | first | replace('*.', '') }}) if [ -d "$RENEWED_LINEAGE" ]; then daemon_cert_root={{ domain.deploy.location | default('/etc/ssl/certs') }} # Make sure the certificate and private key files are # never world readable, even just for an instant while # we're copying them into daemon_cert_root. umask 077 cp "$RENEWED_LINEAGE/fullchain.pem" "$daemon_cert_root/$domain.cert" cp "$RENEWED_LINEAGE/privkey.pem" "$daemon_cert_root/$domain.key" {% if domain.deploy.haproxy_mode | default(false) %} cat "$RENEWED_LINEAGE/fullchain.pem" "$RENEWED_LINEAGE/privkey.pem" | tee \ "$daemon_cert_root/$domain.haproxy.pem" {% endif %} # Apply the proper file ownership and permissions for # the daemon to read its certificate and key. {% if domain.deploy.owner is defined and domain.deploy.owner|length %} if [ `id -u {{ domain.deploy.owner }} 2>/dev/null || echo -1` -ge 0 ]; then chown {{ domain.deploy.owner }} "$daemon_cert_root/$domain.cert" \ "$daemon_cert_root/$domain.key" fi {% endif %} chmod 400 "$daemon_cert_root/$domain.cert" \ "$daemon_cert_root/$domain.key" {% if domain.deploy.post_hook is defined and domain.deploy.post_hook|length%} {{ domain.deploy.post_hook }} || true {% endif %} fi ;; {% endif %} {% endfor %} esac done
8f2f44bd150781de669d6536f12d1d295d77935a
[ "Shell" ]
1
Shell
opentelekomcloud-infra/ansible-role-certbot
83f2b17bd1160cf1bd780f22a6fba32e012ab8c2
bb393794ec31b730fb4d07cce656d9501df9f8fe
refs/heads/master
<file_sep>master_db_username=exchange master_db_password=<PASSWORD> master_db_url=jdbc:mysql://vpn.server.omni-a.com/OM_sources port=8080<file_sep>package online.omnia.updater; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.jaxrs.config.BeanConfig; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import java.io.ByteArrayInputStream; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.*; import javax.ws.rs.core.Application; /** * Created by lollipop on 22.09.2017. */ public class Main{ public static void main(String[] args) throws ParserConfigurationException, IOException, SAXException { //USD/USD //RUB/USD //RUB/(EUR * 10) //USD/EUR Main main = new Main(); main.update(); } private void update() throws ParserConfigurationException, SAXException, IOException { List<CurrencyEntity> currencyEntities = MySQLDaoImpl.getInstance().getCurrencies(); //yahoo(currencyEntities); HttpMethodsUtils methodsUtils = new HttpMethodsUtils(); SimpleDateFormat simpleDateFormat = new SimpleDateFormat("dd/MM/yyyy"); String answer = methodsUtils.getMethod("http://www.cbr.ru/scripts/XML_daily.asp?date_req=" + simpleDateFormat.format(new Date()), new HashMap<>()); DocumentBuilder documentBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder(); ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(answer.getBytes()); Document document = documentBuilder.parse(byteArrayInputStream); Node root = document.getDocumentElement(); NodeList nodeList = root.getChildNodes(); NodeList childNodes; double usdValue = 1; String charcode = null; double value = 0; List<ExchangeEntity> exchangeEntities = new ArrayList<>(); ExchangeEntity exchangeEntity; for (int i = 0; i < nodeList.getLength(); i++) { childNodes = nodeList.item(i).getChildNodes(); for (int j = 0; j < childNodes.getLength(); j++) { if (childNodes.item(j).getNodeName().equals("CharCode")) { charcode = childNodes.item(j).getTextContent(); } else if (childNodes.item(j).getNodeName().equals("Value")) { value = Double.parseDouble(childNodes.item(j).getTextContent().replaceAll(",", ".")); } } if (charcode != null && charcode.equals("USD")) { usdValue = value; } else { exchangeEntity = new ExchangeEntity(); exchangeEntity.setCurrency(charcode); exchangeEntity.setRate(value); exchangeEntities.add(exchangeEntity); } } exchangeEntity = new ExchangeEntity(); for (ExchangeEntity entity : exchangeEntities) { if (entity.getCurrency().equals("USD")) continue; for (CurrencyEntity currencyEntity : currencyEntities) { if (currencyEntity.getCode().equals(entity.getCurrency())) { value = entity.getRate(); entity.setRate(1 / value * usdValue); entity.setCurrencyId(currencyEntity.getId()); entity.setTime(new Date()); MySQLDaoImpl.getInstance().addExchange(entity); } else if (currencyEntity.getCode().equals("RUB")) { exchangeEntity.setCurrency("RUB"); exchangeEntity.setRate(usdValue); exchangeEntity.setCurrencyId(currencyEntity.getId()); exchangeEntity.setTime(new Date()); } } } MySQLDaoImpl.getInstance().addExchange(exchangeEntity); MySQLDaoImpl.getMasterDbSessionFactory().close(); } private void yahoo(List<CurrencyEntity> currencyEntities) throws ParserConfigurationException, SAXException, IOException { StringBuilder urlBuilder = new StringBuilder("https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.xchange%20where%20pair%20in%20("); for (CurrencyEntity currencyEntity : currencyEntities) { urlBuilder.append("%22USD").append(currencyEntity.getCode()).append("%22,%20"); } urlBuilder = new StringBuilder(urlBuilder.substring(0, urlBuilder.length() - 4)); urlBuilder.append(")&env=store://datatables.org/alltableswithkeys"); System.out.println(urlBuilder.toString()); HttpMethodsUtils methodsUtils = new HttpMethodsUtils(); String answer = methodsUtils.getMethod(urlBuilder.toString(), new HashMap<>()); DocumentBuilder documentBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder(); ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(answer.getBytes()); Document document = documentBuilder.parse(byteArrayInputStream); Node root = document.getDocumentElement(); NodeList nodeList = root.getChildNodes().item(0).getChildNodes(); NodeList valuteList; ExchangeEntity entity; String value = "0.0"; for (int i = 0; i < nodeList.getLength(); i++) { valuteList = nodeList.item(i).getChildNodes(); for (CurrencyEntity currencyEntity : currencyEntities) { for (int j = 0; j < valuteList.getLength(); j++) { if (valuteList.item(j).getNodeName().equals("Rate")) value = valuteList.item(j).getTextContent(); if (valuteList.item(j).getNodeName().equals("Name") && !valuteList.item(j).getTextContent().equals("N/A") && valuteList.item(j).getTextContent().replaceAll("USD/", "") .equals(currencyEntity.getCode())) { entity = new ExchangeEntity(); entity.setCurrencyId(currencyEntity.getId()); entity.setRate(Double.parseDouble(value)); entity.setCurrency(currencyEntity.getCode()); entity.setTime(new Date()); MySQLDaoImpl.getInstance().addExchange(entity); } } } } } }
80adf87120c8ee1259c23980aaaaba8c4b3ab810
[ "Java", "INI" ]
2
INI
VladislavKrets/currencyupdater
9eed2d0e5a7c5e4ab64970c6ff81fe0c0fe42d7a
cee7c1c97309414f45051d2670954bd6a4ca0db7
refs/heads/master
<file_sep>import Joi from "joi"; export default { // POST /api/users/register createUser: { body: { email: Joi.string().required(), password: Joi.string().required(), phoneNo: Joi.string().required() } }, // POST /api/posts createPost: { payload: { output: 'file' }, }, // UPDATE /api/users updateUser: { body: { fname: Joi.string().required(), lname: Joi.string().required(), phoneNo: Joi.string().required() } }, // POST /api/auth/login login: { body: { email: Joi.string().required(), password: <PASSWORD>(), userType: Joi.string().required() } }, // POST /api/auth/loginadmin loginadmin: { body: { email: Joi.string().required(), password: <PASSWORD>().<PASSWORD>() } }, // GET /api/admin/user userList: { query: { limit: Joi.number() .integer() .min(1), pageNo: Joi.number() .integer() .min(1), userType: Joi.string().required() } }, // Get /api/admin/approvePendingUsers pending: { query: { userType: Joi.string().required() } }, // PUT /api/admin/approveUser approve: { query: { id: Joi.string() .alphanum() .required() } }, reject: { query: { id: Joi.string() .alphanum() .required() } }, // GET /api/admin/allusers // alluserList: { // query: { // limit: Joi.number().integer().min(1), // } // }, // PUT /api/admin/user: userId updateUserByAdmin: { body: { _id: Joi.string() .alphanum() .required(), userType: Joi.string() .valid("rider", "driver") .required() } }, // GET /api/admin/tripDetails tripList: { query: { limit: Joi.number() .integer() .min(1), pageNo: Joi.number() .integer() .min(1) } }, // GET /api/admin/tripDetails userTripRequestList: { query: { limit: Joi.number() .integer() .min(1), pageNo: Joi.number() .integer() .min(1), filter: Joi.string() } }, tripRevenueGraph: { params: { revenueYear: Joi.number() .integer() .min(2000) } }, createNewTrip: { body: { riderId: Joi.string().regex(/^[0-9a-fA-F]{24}$/), driverId: Joi.string().regex(/^[0-9a-fA-F]{24}$/) } }, updateTripObject: { body: { riderId: Joi.string().regex(/^[0-9a-fA-F]{24}$/), driverId: Joi.string().regex(/^[0-9a-fA-F]{24}$/), pickUpAddress: Joi.string(), destAddress: Joi.string(), paymentMode: Joi.string(), taxiType: Joi.string(), riderRatingByDriver: Joi.number().integer(), driverRatingByRider: Joi.number().integer(), tripStatus: Joi.string(), tripIssue: Joi.string(), tripAmt: Joi.number().integer(), seatBooked: Joi.number().integer() } }, createNewUser: { body: { userType: Joi.string() .valid("rider", "driver", "admin", "superAdmin") .required(), email: Joi.string() .email() .required(), password: Joi.string() .regex(/^[a-zA-Z0-9]{3,30}$/) .required() } } }; <file_sep>import httpStatus from "http-status"; import jwt from "jsonwebtoken"; import cloudinary from "cloudinary"; import { get } from "lodash"; import formidable from "formidable"; import APIError from "../helpers/APIError"; import { fetchReturnObj } from "../service/transform-response"; import albumImageSchema from "../models/albumImage"; import User from "../models/user"; import config from "../../config/env"; import ServerConfig from "../models/serverConfig"; // import post from "../models/post"; import notificationCtrl from "./notification"; /** * Return the post details of the user. * @param req * @param res * @param next * @returns */ function decode(token) { return jwt.decode(token, config.jwtSecret); } function tokenFromHeaders(req) { if ( req.headers.authorization && req.headers.authorization.split(" ")[0] === "JWT" ) { return req.headers.authorization.split(" ")[1]; } return ""; } /** * Get getCloudinaryDetails * @returns {Cloudinary Details} */ function getCloudinaryDetails() { return new Promise((resolve, reject) => { ServerConfig.findOneAsync({ key: "cloudinaryConfig" }) .then(foundDetails => { resolve(foundDetails.value); }) .catch(err => { reject(err); }); }); } function getAlbumImages(req,res,next){ const token = tokenFromHeaders(req); const userData = decode(token); // console.log("deletePost") // const { id } = req.params; // console.log(req.params) albumImageSchema.find({ albumId: req.body.albumId }) .then(result => { if (result) { return res.send({ success: true, data: result, message: "Album Images" }); } return res.send({ success: false, data: null, message: "Falied to get album by user ID" }); }) .catch(e => { return res.send({ success: false, data: e, message: "Falied to delete post by ID" }); }); } function deleteImage(req, res, next) { console.log("deleteImage") const { id } = req.params; console.log(req.params) albumImageSchema.remove({ _id: id }) .then(result => { if (result) { return res.send({ success: true, data: result, message: "Deleted post by ID" }); } return res.send({ success: false, data: null, message: "Falied to delete post by ID" }); }) .catch(e => { return res.send({ success: false, data: e, message: "Falied to delete post by ID" }); }); } function createPost(req, res, next) { const token = tokenFromHeaders(req); const userData = decode(token); console.log("reach dfghhj") getCloudinaryDetails() .then(value => { if (value) { cloudinary.config({ cloud_name: value.cloud_name, api_key: value.api_key, api_secret: value.api_secret }); const form = new formidable.IncomingForm(); form.on("error", err => { console.error(err, "error heree"); //eslint-disable-line }); console.log("reach dfghhj1111111111") form.parse(req, (err, fields, files) => { const img = get(files, "image.path", ""); cloudinary.v2.uploader.upload(img, (error, results) => { if (error) { return res.send({ success: false, message: "Image Not Found" }); } if (results) { albumImageSchema.create({ imageUrl: results.url, // userId: get(userData, "_id", ""), albumId: fields.albumId, // longAddress: fields.longAddress, // shortAddress: fields.shortAddress, // loc: !fields.loc ? undefined : JSON.parse(fields.loc), postedAt: Date.now() }) .then(data => { const notificationData = { userId: req.user._id, postId: data._id }; notificationCtrl.createNotification("post", notificationData); res.send({ success: true, message: "image uplaoded" }); }) .catch(e => { res.send({ success: false, message: "failed to upload" }); }); } }); }); } }) .catch(e => { const err = new APIError( `Error occured while updatating trip object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } export default { getAlbumImages, createPost, deleteImage, }; <file_sep>import express from "express"; import httpStatus from "http-status"; import passport from "passport"; import validate from 'express-validation'; import APIError from "../helpers/APIError"; import config from "../../config/env"; import appConfigCtrl from "../controllers/appConfig"; import albumCtrl from "../controllers/album"; import paramValidation from '../../config/param-validation'; const router = express.Router(); /** * Middleware for protected routes. All protected routes need token in the header in the form Authorization: JWT token */ router.use((req, res, next) => { passport.authenticate( "jwt", config.passportOptions, (error, userDtls, info) => { //eslint-disable-line if (error) { const err = new APIError( "token not matched", httpStatus.INTERNAL_SERVER_ERROR ); return next(err); } else if (userDtls) { req.user = userDtls; next(); } else { const err = new APIError( `token is valid but no user found ${info}`, httpStatus.UNAUTHORIZED ); return next(err); } } )(req, res, next); }); /** /api/album - */ router.route('/createAlbum') .post(albumCtrl.createAlbum); router.route('/deleteAlbum/:id') .delete(albumCtrl.deleteAlbum); // router.route('/updateAlbum/:id') // .put(albumCtrl.updateAlbum); router.route('/getAlbum') .get(albumCtrl.getAlbum); export default router; <file_sep>import gpsDistannce from "gps-distance"; import config from "../../../config/env"; import { fetchReturnObj } from "../../service/transform-response"; import sendNotification from "../../service/pushNotification"; import SocketStore from "../../service/socket-store.js"; //eslint-disable-line import TripRequestSchema from "../../models/trip-request"; import TripSchema from "../../models/trip"; import UserSchema from "../../models/user"; /** * updateLocation handler, handle location update of the rider or driver * @param socket object * @returns {*} */ /* eslint-disable */ function updateLocationHandler(socket) { /** * updateLocation event is fired by rider or driver whenever their location is changed. also it send location update to corresponding rider or driver if they are in any tripRequest or trip. * @param userObj - user whose location has to be updated * @returns emit an updateDriverLocation or updateRiderLocation event based on userType. */ socket.on("updateLocation", userObj => { const userType = userObj.userType; let searchObj = {}; if (userType === "1") { searchObj = { riderId: userObj._id }; } else if (userType === "2") { searchObj = { driverId: userObj._id }; } console.log(userType); const userID = userObj._id; UserSchema.findOneAndUpdateAsync( { _id: userID }, { $set: { gpsLoc: userObj.gpsLoc } }, { new: true } ) .then(updatedUser => { SocketStore.emitByUserId(userID, "locationUpdated", updatedUser); TripRequestSchema.findOneAsync({ $and: [ searchObj, { $or: [ { tripRequestStatus: "enRoute" }, { tripRequestStatus: "arriving" }, { tripRequestStatus: "arrived" } ] } ] }) .then(tripRequestObj => { if (tripRequestObj) { if (userType === "1") { SocketStore.emitByUserId( tripRequestObj.riderId, "updateDriverLocation", updatedUser.gpsLoc ); SocketStore.emitByUserId( "59428b1bb0c3cc0f554fd52a", "getDriverDetails", updatedUser.gpsLoc ); const driverObj = updatedUser; changedTripRequestStatus(driverObj, tripRequestObj); } else if (userType === "2") { SocketStore.emitByUserId( tripRequestObj.driverId, "updateRiderLocation", updatedUser.gpsLoc ); } } else { TripSchema.findOneAsync({ $and: [searchObj, { tripStatus: "onTrip" }] }) .then(tripObj => { if (tripObj) { if (userType === "1") { SocketStore.emitByUserId( tripObj.riderId, "updateDriverLocation", updatedUser.gpsLoc ); SocketStore.emitByUserId( "59428b1bb0c3cc0f554fd52a", "getDriverDetails", updatedUser.gpsLoc ); } else if (userType === "2") { SocketStore.emitByUserId( tripObj.driverId, "updateRiderLocation", updatedUser.gpsLoc ); } } else { // no corresponding rider or driver found to emit the update location } }) .error(e => { SocketStore.emitByUserId(userID, "socketError", e); }); } }) .error(e => { SocketStore.emitByUserId(userID, "socketError", e); }); }) .error(e => { SocketStore.emitByUserId(userID, "socketError", e); }); }); } function changedTripRequestStatus(driverObj, tripRequestObj) { let dist = gpsDistannce( driverObj.gpsLoc[1], driverObj.gpsLoc[0], tripRequestObj.srcLoc[1], tripRequestObj.srcLoc[0] ); let newTripRequestStatus = null; const currentTripRequestStatus = tripRequestObj.tripRequestStatus; dist = dist.toFixed(4) * 1000; // dist in meters console.log("gps location driver", driverObj.gpsLoc); console.log("distance %%%%%%%%", dist); if (dist <= config.arrivedDistance) { newTripRequestStatus = "arrived"; } else if (dist > config.arrivedDistance && dist < config.arrivingDistance) { newTripRequestStatus = "arriving"; } else { newTripRequestStatus = "enRoute"; } if (newTripRequestStatus !== currentTripRequestStatus) { tripRequestObj.tripRequestStatus = newTripRequestStatus; TripRequestSchema.findOneAndUpdateAsync( { _id: tripRequestObj._id }, { $set: tripRequestObj }, { new: true } ) .then(updatedTripRequestObj => { fetchReturnObj(updatedTripRequestObj).then(updatedTripRequestObj123 => { if (updatedTripRequestObj123.tripRequestStatus === "arrived") { sendNotification( updatedTripRequestObj.riderId, `Driver has ${updatedTripRequestObj123.tripRequestStatus}` ); sendNotification( updatedTripRequestObj.driverId, updatedTripRequestObj123.tripRequestStatus ); } else { sendNotification( updatedTripRequestObj.riderId, `Driver is ${updatedTripRequestObj123.tripRequestStatus}` ); sendNotification( updatedTripRequestObj.driverId, updatedTripRequestObj123.tripRequestStatus ); } SocketStore.emitByUserId( updatedTripRequestObj.riderId, "tripRequestUpdated", updatedTripRequestObj123 ); SocketStore.emitByUserId( updatedTripRequestObj.driverId, "tripRequestUpdated", updatedTripRequestObj123 ); }); }) .error(err => { SocketStore.emitByUserId(tripRequestObj.riderId, "socketError", { message: "error while updating tripRequestStatus based on distance", data: err }); SocketStore.emitByUserId(tripRequestObj.driverId, "socketError", { message: "error while updating tripRequestStatus based on distance", data: err }); }); } } export default updateLocationHandler; <file_sep>import SocketStore from '../../service/socket-store.js'; //eslint-disable-line import UserSchema from '../../models/user'; /** * user handler, handle update of the driver availability and send to riders * * @param socket object * @returns {*} */ /* eslint-disable */ function userHandler(socket) { socket.on('updateAvailable', userObj => { const userType = userObj.userType; let searchObj = {}; if (userType === 'driver') { searchObj = { driverId: userObj._id, }; } const userID = userObj._id; UserSchema.findOneAndUpdateAsync({ _id: userID }, { $set: { isAvailable: userObj.isAvailable } }, { new: true }) .then(updatedUser => { SocketStore.emitByUserId(userID, 'updateAvailable', updatedUser); SocketStore.emitToAll('updateAvailable', updatedUser); }) .error(e => { SocketStore.emitByUserId(userID, 'socketError', e); }); }); } export default userHandler; <file_sep>/* eslint-disable */ import nodemailer from "nodemailer"; import smtpTransport from "nodemailer-smtp-transport"; import path from "path"; import ServerConfig from "../models/serverConfig"; import UserSchema from "../models/user"; import TripRequestSchema from "../models/trip-request"; const EmailTemplate = require("email-templates"); const registerDir = path.resolve(__dirname, "../templates", "register"); const register = new EmailTemplate(path.join(registerDir)); const endtripDir = path.resolve(__dirname, "../templates", "endTrip"); const endTrip = new EmailTemplate(path.join(endtripDir)); const forgotDir = path.resolve(__dirname, "../templates", "forgotPassword"); const forgot = new EmailTemplate(path.join(forgotDir)); const rideAcceptDir = path.resolve(__dirname, "../templates", "rideAccept"); const rideAccept = new EmailTemplate(path.join(rideAcceptDir)); const emailDir = path.resolve(__dirname, "../templates", "emailVerify"); const emailVerify = new EmailTemplate(path.join(emailDir)); var fname = ''; var lname = ''; var userType = ''; function getEmailApiDetails() { return new Promise((resolve, reject) => { ServerConfig.findOneAsync({ key: "emailConfig" }) .then(foundDetails => { resolve(foundDetails.value); }) .catch(err => { reject(err); }); }); } function sendEmail(userId, responseObj, type) { UserSchema.findOneAsync({ _id: userId }).then(userObj => { getEmailApiDetails().then(details => { console.log(details, "check emailApiDetails"); const transporter = nodemailer.createTransport( smtpTransport({ host: details.host, port: details.port, secure: details.secure, // secure:true for port 465, secure:false for port 587 auth: { user: details.username, pass: <PASSWORD> } }) ); const locals = Object.assign({}, { data: responseObj }); fname = userObj.fname lname = userObj.lname userType = userObj.userType if (type === "emailVerify") { const emailVerify = `<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta name="viewport" content="width=device-width; initial-scale=1.0; maximum-scale=1.0;" /> <title>Taxi App</title> <link href="https://fonts.googleapis.com/css?family=Open+Sans:400,600" rel="stylesheet"> <style type="text/css"> * { font-family: 'Open Sans', sans-serif; font-weight: 300; } </style> </head> <body style="padding:0; margin:0; background: #f4f4f4;"> <table style="max-width: 700px; width: 100%; border: 0; margin:20px auto;"> <tr> <td> <table style="border: 1px solid #d7d7d7; width: 100%; max-width: 700px; background: white; position:relative "> <tr> <td style="background: #1B557A; height: auto; vertical-align: middle;"> <div style=" padding: 10px 15px"> <img style="width: 50px; border-radius: 50%;" src="http://res.cloudinary.com/dujorqozx/image/upload/v1587979921/yhrgxsgrhgviutgxsoqc.png" alt="KlixxApp" /> <span style="display: inline-block; padding-left: 15px; vertical-align: top; color: #f4f4f4; font-size: 20px;"><br />KlixxApp</span> </div> </td> </tr> <tr> <td style="padding:40px 20px 0px 20px;"> <h1 style="font-weight: 300;"> Hi `+ fname +` `+ lname +` </h1> <div style="font-size:24px;"> <p style="color: #666;">ThankYou for registering with us as `+userType+` <p style="color: #666;">Please verify your email address to further secure your KlixxApp Account</p> <button type="button" style="background-color: #4CAF50; border: none;color: white;padding: 15px 32px; text-decoration: none;display: block; font-size: 16px;margin-left:auto;margin-right: auto;"> <a style="text-decoration: none; color: #fff" href="https://www.klixx.app/"> Verify Your Email</a> </button> <p style="color: #666;"><br />Enjoy using Klixx App !!</p> </div> <p style="margin:60px 0 40px 0; font-size: 18px;">Thanks<br /> KlixxApp Team</p> <!-- <h1 style="text-align: center; font-weight: 300; font-size: 50px; margin:0;"> <p style="margin:60px 0 40px 0; font-size: 18px;"> <a href="https://www.klixx.app/" style="color:#ddd">Click here</a><br /> To verify your email</p> </h1> --> </td> </tr> <tr> <td> <div style="background: #1B557A; height: 30px; width: 100%; position: absolute;text-align:center; padding-top: 10px;"> <a href="https://www.klixx.app/" style="color:#ddd">Contact Us</a> </div> </td> </tr> </table> </td> </tr> </table> </body> </html>` // emailVerify.render(locals, (err, results) => { //eslint-disable-line // if (err) { // return console.error(err); //eslint-disable-line // } const mailOptions = { from: "<EMAIL>",//details.username, // sender address to: userObj.email, // list of receivers subject: "Verify your Account with Klixx App", // Subject line // text: results.text, // plain text body html: emailVerify // html body }; transporter.sendMail(mailOptions, (error, info) => { if (error) { console.log("error in emailApi", error); return error; } console.log("result in emailApi", info); return info; }); // }); } if (type === "register") { const registerContent = `<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta name="viewport" content="width=device-width; initial-scale=1.0; maximum-scale=1.0;" /> <title>Taxi App</title> <link href="https://fonts.googleapis.com/css?family=Open+Sans:400,600" rel="stylesheet"> <style type="text/css"> * { font-family: 'Open Sans', sans-serif; font-weight: 300; } </style> </head> <body style="padding:0; margin:0; background: #f4f4f4;"> <table style="max-width: 700px; width: 100%; border: 0; margin:20px auto;"> <tr> <td> <table style="border: 1px solid #d7d7d7; width: 100%; max-width: 700px; background: white; position:relative "> <tr> <td style="background: #1B557A; height: auto; vertical-align: middle;"> <div style=" padding: 10px 15px"> <img style="width: 50px; border-radius: 50%;" src="http://res.cloudinary.com/dujorqozx/image/upload/v1587979921/yhrgxsgrhgviutgxsoqc.png" alt="KlixxApp" /> <span style="display: inline-block; padding-left: 15px; vertical-align: top; color: #f4f4f4; font-size: 20px;"><br />KlixxApp</span> </div> </td> </tr> <tr> <td style="padding:40px 20px 0px 20px;"> <h1 style="font-weight: 300;"> Hi `+ fname +` `+ lname +`</h1> <div style="font-size:24px;"> <p>ThankYou for registering with us as `+ userType +`<br /><br /> Enjoy using Klixx App !!.</p> </div> <p style="margin:60px 0 40px 0; font-size: 18px;">Thanks<br /> KlixxApp Team</p> <h1 style="text-align: center; font-weight: 300; font-size: 50px; margin:0;"> </h1> </td> </tr> <tr> <td> // <div style="background: #1B557A; height: 30px; width: 100%; position: absolute;text-align:center; padding-top: 10px;"> // // <a href="https://www.klixx.app/" style="color:#ddd">Contact Us</a> // </div> </td> </tr> </table> </td> </tr> </table> </body> </html>` console.log("this is no error") registerContent const mailOptions = { from: "<EMAIL>",//details.username, // sender address to: userObj.email, // list of receivers subject: "Your Account with Klixx app is created", // Subject line html: registerContent, // plain text body // html: results.html // html body }; transporter.sendMail(mailOptions, (error, info) => { if (error) { console.log("error in emailApi", error); return error; } console.log("result in emailApi", info); return info; }); // }); } if (type === "endTrip") { const endTrip= `<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta name="viewport" content="width=device-width; initial-scale=1.0; maximum-scale=1.0;" /> <title>Taxi App</title> <link href="https://fonts.googleapis.com/css?family=Open+Sans:400,600" rel="stylesheet"> <style type="text/css"> * { font-family: 'Open Sans', sans-serif; font-weight: 300; } </style> </head> <body style="padding:0; margin:0; background: #f4f4f4;"> <table style="max-width: 700px; width: 100%; border: 0; margin:20px auto;"> <tr> <td> <table style="border: 1px solid #d7d7d7; width: 100%; max-width: 700px; background: white; padding-bottom: 15px; "> <tr> <td style="background: #1B557A; height: auto; vertical-align: middle;"> <div style=" padding:20px 250px"> <img style="width: 50px; border-radius: 50%;" src="http://res.cloudinary.com/dujorqozx/image/upload/v1587979921/yhrgxsgrhgviutgxsoqc.png" alt="KlixxApp" /> <span style="display: inline-block; padding-left: 15px; vertical-align: top; color: #f4f4f4; font-size: 20px;"><br />KlixxApp</span> </div> </td> </tr> <tr> <td style="padding:40px 20px 0px 20px;"> <h2 style="font-weight: 300; text-align: center;">Thank you for travelling with us, </h2> <h1 style="text-align: center; font-weight: 300; font-size: 80px; margin:0;"> <span style="font-weight: 300; font-size: 30px;"> $ </span> `+ userObj.tripAmt ` </h1> </td> </tr> <tr> <td style="padding: 20px " width="100%"> <!-- <h3 style="font-size: 21px; margin:0;"> Bill details</h3> <table style="width: 100%; border: solid 1px #ddd;" cellpadding="10" width="100%" cellspacing="0"> <tr style="background: #eee;"> <td>Ride fare</td> <td style="font-weight: 500; text-align: right; padding-right: 20px;">20 Rs</td> </tr> <tr> <td>Taxes </td> <td style="font-weight: 500; text-align: right; padding-right: 20px;">5 Rs</td> </tr> <tr style="background: #eee;"> <td>Total fare</td> <td style="font-weight: 500; text-align: right; padding-right: 20px;">25 Rs</td> </tr> </table> --> <table width="100%;" style="padding: 20px 0; color: green;"> <tr> <td> Driver Name: <span style="font-weight: 600;">`+userObj.fname +``+ userObj.lname+`</span> </td> <td style="text-align: right;"> Contact Details: <span style="font-weight: 600;">`+ userObj.driver.phoneNo+` </span> </td> </tr> <tr> <td> Client Name: <span style="font-weight: 600;">`+ userObj.rider.fname + ` `+ userObj.rider.lname +`</span> </td> <td style="text-align: right;"> Contact Details: <span style="font-weight: 600;">`+ userObj.rider.phoneNo +`</span> </td> </tr> </table> <div style="border-top:solid 1px #ddd; border-bottom: solid 1px #ddd; padding: 20px 0;"> <h3 style="font-weight: 500; margin: 0 0 5px 0;"> Booking <span style="color: #1B557A; font-weight: 600; margin:5px 0; font-size: 16px; display: inline-block; margin-left: 10px;">Time:`+ userObj.bookingTime +`</span> </h3> <address style="color: #666;"> Full address: `+ userObj.pickUpAddress +` </address> <h3 style="font-weight: 500; margin: 20px 0 5px 0;"> Drop <span style="color: #1B557A; font-weight: 600; margin:5px 0; font-size: 16px; display: inline-block; margin-left: 10px;">Time: `+ userObj.tripEndTime +`</span> </h3> <address style="color: #666;"> Full address: `+ userObj.destAddress +` </address> </div> <table width="100%" style="text-align: center; margin-top:20px;"> <tr> <th width="50%" style="font-weight: 500; font-size: 16px;">Payment Mode</th> <th width="50%" style="font-weight: 500; font-size: 16px;">Total Amount</th> </tr> <tr style="color: green; margin:5px 0; font-size: 26px;"> <td>Card</td> <td>$`+ userObj.tripAmt +`</td> </tr> </table> </td> </tr> </table> </td> </tr> </table> </body> </html>` // endTrip.render(locals, (err, results) => { if (err) { return console.error(err); } const mailOptions = { from: "<EMAIL>",//details.username, // sender address to: userObj.email, // list of receivers subject: "Details with Klixx App", // Subject line text: results.text, // plain text body html: results.html // html body }; transporter.sendMail(mailOptions, (error, info) => { if (error) { console.log("error in emailApi", error); return error; } console.log("result in emailApi", info); return info; }); // }); } if (type === "forgot") { forgot.render(locals, (err, results) => { if (err) { return console.error(err); } const mailOptions = { from: "<EMAIL>",//details.username, // sender address to: userObj.email, // list of receivers subject: "Your Account Password with Klixx App", // Subject line text: results.text, // plain text body html: results.html // html body }; transporter.sendMail(mailOptions, (error, info) => { if (error) { console.log("error in emailApi", error); return error; } console.log("result in emailApi", info); return info; }); }); } if (type === "rideAccept") { rideAccept.render(locals, (err, results) => { if (err) { return console.error(err); } const mailOptions = { from: "<EMAIL>",//details.username, // sender address to: userObj.email, // list of receivers subject: "Klixx App Driver Details", // Subject line text: results.text, // plain text body html: results.html // html body }; transporter.sendMail(mailOptions, (error, info) => { if (error) { console.log("error in emailApi", error); return error; } console.log("result in emailApi", info); return info; }); }); } if (type === "requestTripForPhotoGrapher") { // forgot.render(locals, (err, results) => { // if (err) { // return console.error(err); // } // var searchObj ={}; // searchObj.driverId = responseObj.riderId; var DriverInfor = UserSchema.findOneAsync({ _id: responseObj.riderId }) .then(foundUser => { if (foundUser !== null) { console.log(foundUser); console.log("foundUser"); const requestTripForPhotoGrapher = `<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta name="viewport" content="width=device-width; initial-scale=1.0; maximum-scale=1.0;" /> <title>Taxi App</title> <link href="https://fonts.googleapis.com/css?family=Open+Sans:400,600" rel="stylesheet"> <style type="text/css"> * { font-family: 'Open Sans', sans-serif; font-weight: 300; } </style> </head> <body style="padding:0; margin:0; background: #f4f4f4;"> <table style="max-width: 700px; width: 100%; border: 0; margin:20px auto;"> <tr> <td> <table style="border: 1px solid #d7d7d7; width: 100%; max-width: 700px; background: white; position:relative "> <tr> <td style="background: #1B557A; height: auto; vertical-align: middle;"> <div style=" padding: 10px 15px"> <img style="width: 50px; border-radius: 50%;" src="http://res.cloudinary.com/dujorqozx/image/upload/v1587979921/yhrgxsgrhgviutgxsoqc.png" alt="KlixxApp" /> <span style="display: inline-block; padding-left: 15px; vertical-align: top; color: #f4f4f4; font-size: 20px;"><br />KlixxApp</span> </div> </td> </tr> <tr> <td style="padding:40px 20px 0px 20px;"> <h1 style="font-weight: 300;"> Hi `+ fname +` `+ lname +`</h1> <div style="font-size:24px;"> <p> Photo Session Location - `+responseObj.pickUpAddress+` </p> <p> Payment Mode - Card </p> <p> Client Name - `+foundUser.fname+` `+foundUser.lname+` </p> </div> <p style="margin:60px 0 40px 0; font-size: 18px;">Thanks<br /> KlixxApp Team</p> <h1 style="text-align: center; font-weight: 300; font-size: 50px; margin:0;"> </h1> </td> </tr> <tr> <td> <div style="background: #1B557A; height: 30px; width: 100%; position: absolute;text-align:center; padding-top: 10px;"> <a href="https://www.klixx.app/" style="color:#ddd">Contact Us</a> </div> </td> </tr> </table> </td> </tr> </table> </body> </html>` const mailOptions = { from: "<EMAIL>",//details.username, // sender address to: userObj.email, // list of receivers subject: "Photo Session Request", // Subject line // text: results.text, // plain text body html: requestTripForPhotoGrapher // html body }; transporter.sendMail(mailOptions, (error, info) => { if (error) { console.log("error in emailApi", error); return error; } console.log("result in emailApi", info); return info; }); // return res.send(returnObj); } }) // console.log(DriverInfor) // }); } }); }); } export default sendEmail; <file_sep>export default { env: "development", jwtSecret: "<KEY>", //db: 'mongodb://localhost/taxiApp-development', //db: "mongodb://root:Password!<EMAIL>:41557/nitprise-development", // db:"mongodb+srv://nitprise:[email protected]/klixx?retryWrites=true&w=majority", db:"mongodb+srv://rajeev:[email protected]/klixx?retryWrites=true&w=majority", port: 3010, passportOptions: { session: false }, radius: 50 / 6378, // where 20 Kms is used as radius to find nearby driver arrivedDistance: 200, arrivingDistance: 1000, limit: 10, skip: 0, tripFilter: "All" }; <file_sep>import httpStatus from "http-status"; import jwt from "jsonwebtoken"; import APIError from "../helpers/APIError"; import config from "../../config/env"; import UserSchema from "../models/user"; /** * Returns jwt token and user object if valid email and password is provided * @param req (email, password, userType) * @param res * @param next * @returns {jwtAccessToken, user} */ function testServer(req, res, next) { return res.send({ success: true, message: "Server Test Running.", }); } function loginadmin(req, res, next) { UserSchema.findOneAsync( { email: req.body.email, $or: [{ userType: "admin" }, { userType: "superAdmin" }] }, "+password" ) .then(user => { //eslint-disable-line if (!user) { const err = new APIError( "User not found with the given email id", httpStatus.NOT_FOUND ); return next(err); } else { user.comparePassword(req.body.password, (passwordError, isMatch) => { //eslint-disable-line if (passwordError || !isMatch) { const err = new APIError( "Incorrect password", httpStatus.UNAUTHORIZED ); return next(err); } user.loginStatus = true; user.gpsLoc = [req.body.lat, req.body.lon]; const token = jwt.sign(user.toJSON(), config.jwtSecret); UserSchema.findOneAndUpdateAsync( { _id: user._id }, { $set: user }, { new: true } ) //eslint-disable-line .then(updatedUser => { const returnObj = { success: true, message: "user successfully logged in", data: { jwtAccessToken: `JWT ${token}`, user: updatedUser } }; res.json(returnObj); }) .error(err123 => { const err = new APIError( `error in updating user details while login ${err123}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); }); } }) .error(e => { const err = new APIError( `erro while finding user ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } function login(req, res, next) { console.log('Auth login') const userObj = { email: req.body.email, userType: req.body.userType }; UserSchema.findOneAsync(userObj, "+password") .then(user => { //eslint-disable-line if (!user) { const err = new APIError( "User not found with the given email id", httpStatus.NOT_FOUND ); return next(err); } else { user.comparePassword(req.body.password, (passwordError, isMatch) => { //eslint-disable-line if (passwordError || !isMatch) { const err = new APIError( "Incorrect password", httpStatus.UNAUTHORIZED ); return next(err); } user.loginStatus = true; user.gpsLoc = [req.body.lat, req.body.lon]; const token = jwt.sign(user.toJSON(), config.jwtSecret); UserSchema.findOneAndUpdateAsync( { _id: user._id }, { $set: user }, { new: true } ) //eslint-disable-line .then(updatedUser => { const returnObj = { success: true, message: "user successfully logged in", data: { jwtAccessToken: `JWT ${token}`, user: updatedUser } }; res.json(returnObj); }) .error(err123 => { const err = new APIError( `error in updating user details while login ${err123}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); }); } }) .error(e => { const err = new APIError( `erro while finding user ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } /** This is a protected route. Change login status to false and send success message. * @param req * @param res * @param next * @returns success message */ function logout(req, res, next) { const userObj = req.user; if (userObj === undefined || userObj === null) { console.log("user obj is null or undefined inside logout function"); //eslint-disable-line } userObj.loginStatus = false; userObj.isAvailable = false; UserSchema.findOneAndUpdate( { _id: userObj._id, loginStatus: true }, { $set: userObj }, { new: true }, (err, userDoc) => { //eslint-disable-line if (err) { const error = new APIError( "error while updateing login status", httpStatus.INTERNAL_SERVER_ERROR ); next(error); } if (userDoc) { const returnObj = { success: true, message: "user logout successfully" }; res.json(returnObj); } else { const error = new APIError("user not found", httpStatus.NOT_FOUND); next(error); } } ); } // { $or: [{ email: req.body.email }, { phoneNo: req.body.phoneNo }] } function checkUser(req, res) { UserSchema.findOneAsync({ email: req.body.email }) .then(foundUser => { if (foundUser !== null) { const jwtAccessToken = jwt.sign(foundUser.toJSON(), config.jwtSecret); const returnObj = { success: true, message: "User Exist", data: {} }; returnObj.data = { user: foundUser, jwtAccessToken: `JWT ${jwtAccessToken}` }; return res.send(returnObj); } else { const returnObj = { success: true, message: "New User" }; return res.send(returnObj); } }) .catch(error => { console.log(error); //eslint-disable-line }); } export default { login, logout, checkUser, loginadmin, testServer }; <file_sep>import express from "express"; import adminRoutes from "./admin"; import authRoutes from "./auth"; import configRoutes from "./config"; import paymentRoutes from "./payment"; import postRoutes from "./post"; import albumRoutes from "./album"; import syncDataRoute from "./sync-data"; import tripRoutes from "./trip"; import userRoutes from "./user"; import verifyRoutes from "./verify"; import notificationRoute from './notification'; import albumImage from './albumImage'; import userNameCtrl from "../controllers/username"; const router = express.Router(); /** GET /health-check - Check service health */ router.get("/health-check", (req, res) => res.send("OK")); router.get("/", (req, res) => res.send("OK")); // mount user routes at /verify router.use("/verify", verifyRoutes); // mount user routes at /users router.use("/users", userRoutes); // mount check-username routes at /check-username router.get("/check-username", userNameCtrl.getUsername); // mount user routes at /users router.use("/config", configRoutes); // mount auth routes at /auth router.use("/auth", authRoutes); // mount trip routes at /trips router.use("/trips", tripRoutes); // mount sync data route at /sync-data router.use("/syncData", syncDataRoute); // mount admin routes at /admin router.use("/admin", adminRoutes); // mount payment routes at /payment router.use("/payment", paymentRoutes); // mount post routes at /posts router.use("/posts", postRoutes); router.use("/album", albumRoutes); //mount notification routes at /notification router.use('/notification', notificationRoute) router.use('/albumImage', albumImage) export default router; <file_sep>import Expo from 'expo-server-sdk'; import UserSchema from '../models/user'; // To check if something is a push token // const isPushToken = Exponent.isExponentPushToken(somePushToken); const expo = new Expo(); function sendNotification(userId, notification) { UserSchema.findOneAsync({ _id: userId }).then((userObj) => { try { const isPushToken = Expo.isExponentPushToken(userObj.pushToken); if (isPushToken) { const receipts = expo.sendPushNotificationsAsync([ { to: userObj.pushToken, sound: 'default', body: notification, data: { withSome: notification }, }, ]); // console.log(receipts); return receipts; } } catch (error) { return error; // console.error(error); } }); } export default sendNotification; <file_sep>import express from 'express'; import httpStatus from 'http-status'; import passport from 'passport'; import APIError from '../helpers/APIError'; import config from '../../config/env'; import paymentCtrl from '../controllers/payment'; const router = express.Router(); /** * Middleware for protected routes. All protected routes need token in the header in the form Authorization: JWT token */ router.use((req, res, next) => { passport.authenticate('jwt', config.passportOptions, (error, userDtls, info) => { //eslint-disable-line if (error) { const err = new APIError('token not matched', httpStatus.INTERNAL_SERVER_ERROR); return next(err); } else if (userDtls) { req.user = userDtls; next(); } else { const err = new APIError(`token is valid but no user found ${info}`, httpStatus.UNAUTHORIZED); return next(err); } })(req, res, next); }); /** GET /api/payment - Returns wallet balance details for the user */ // router.route('/') // .post(paymentCtrl.payAll); /** GET /api/payment/wallet - Returns wallet balance details for the rider triver and owner */ router.route('/wallet') .post(paymentCtrl.addBalance); /** GET /api/payment/amount - Returns wallet balance details for the user */ router.route('/amount') .post(paymentCtrl.getBalance); router.route('/checkSaveCard') .post(paymentCtrl.checkSaveCard); router.route('/removeCard') .post(paymentCtrl.removeCard); router.route('/addCard') .post(paymentCtrl.addCard); router.route('/cardPayment') .post(paymentCtrl.cardPayment); router.route('/updateCard') .post(paymentCtrl.updateCard); router.route('/earning') .post(paymentCtrl.earning); export default router; <file_sep>import mongoose from 'mongoose'; const Schema = mongoose.Schema; const WalletSchema = new Schema({ userEmail: { type: String, default: null }, userId: { type: Schema.Types.ObjectId, ref: 'User' }, userType: { type: String, default: 'rider' }, stripeAccountId: { type: String, default: null }, walletBalance: { type: Number, default: 0 }, createdAt: { type: Date, default: Date.now }, updatedAt: { type: Date, default: Date.now } }); export default mongoose.model('Wallet', WalletSchema); <file_sep>import express from 'express'; import verifyCtrl from '../controllers/verify'; const router = express.Router(); router.route('/email') .post(verifyCtrl.emailVerify) .put(verifyCtrl.emailVerify) .get(verifyCtrl.emailVerify); // /** GET /api/verify/mobileVerify - */ router.route('/mobile') .get(verifyCtrl.mobileVerify) .post(verifyCtrl.mobileVerify); export default router; <file_sep>import express from 'express'; import httpStatus from 'http-status'; import passport from 'passport'; import APIError from '../helpers/APIError'; import config from '../../config/env'; import syncDataCtrl from '../controllers/sync-data'; const router = express.Router(); /** * Middleware for protected routes. All protected routes need token in the header in the form Authorization: JWT token */ router.use((req, res, next) => { passport.authenticate('jwt', config.passportOptions, (error, userDtls, info) => { //eslint-disable-line if (error) { const err = new APIError('token not matched', httpStatus.INTERNAL_SERVER_ERROR); return next(err); } else if (userDtls) { req.user = userDtls; next(); } else { const err = new APIError(`token is valid but no user found ${info}`, httpStatus.UNAUTHORIZED); return next(err); } })(req, res, next); }); /** GET /api/syncData - Returns tripRequest or trip object if user is in any trip */ router.route('/') .get(syncDataCtrl.getSyncData); export default router; <file_sep>/* eslint-disable */ // Import async.js - utility library for handlng asynchronous calls const async = require('async'); let databaseName = 'taxiApp-development'; const nodeEnv = process.env.NODE_ENV; console.log("nodeEnv--"+nodeEnv) if (nodeEnv === 'development') databaseName = 'taxiApp-development'; if (nodeEnv === 'production') databaseName = 'taxiApp-api-production'; const databaseURL = `mongodb://localhost:27017/${databaseName}`; const mongoose = require('mongoose'); const bcrypt = require('bcrypt-nodejs'); console.log("admin.js---"+databaseURL) const userSchema = new mongoose.Schema({ fname: { type: String, default: null }, lname: { type: String, default: null }, email: { type: String, required: true }, password: { type: String, required: true, select: false }, userType: { type: String, default: 'admin' } }); // Mongoose middleware that is called before save to hash the password userSchema.pre('save', function (next, err) { //eslint-disable-line const user = this; const SALT_FACTOR = 10; console.log(err); //eslint-disable-line if (!user.isNew) { // && !user.isModified('password') return next(); } // Encrypt password before saving to database bcrypt.genSalt(SALT_FACTOR, (error, salt) => { //eslint-disable-line if (error) return next(error); bcrypt.hash(user.password, salt, null, (errors, hash) => { //eslint-disable-line if (errors) return next(errors); user.password = hash; next(); }); }); }); const User = mongoose.model('User', userSchema); async.series( [ function (callback) { //eslint-disable-line mongoose.connect(databaseURL); mongoose.connection.on('connected', () => { console.log('db connected via mongoose'); //eslint-disable-line callback(null, 'SUCCESS - Connected to mongodb'); }); }, (callback) => { const users = []; const user = new User({ fname: 'Rishabh', lname: 'Pandey', email: '<EMAIL>', password: '<PASSWORD>', userType: 'admin' }); users.push(user); console.log('Populating database with %s users', users.length); async.eachSeries( users, (admin, userSavedCallBack) => { user.save((err) => { if (err) { console.dir(err); } console.log('Saving user #%s', user.name); userSavedCallBack(); }); }, (err) => { if (err) { console.dir(err); } console.log('Finished aysnc.each in seeding db'); callback(null, 'SUCCESS - Seed database'); } ); } ], (err, results) => { console.log('\n\n--- Database seed progam completed ---'); if (err) { console.log('Errors = '); console.dir(err); } else { console.log('Results = '); console.log(results); } console.log('\n\n--- Exiting database seed progam ---'); process.exit(0); } ); <file_sep>/* eslint-disable */ import config from '../../../config/env'; import SocketStore from '../../service/socket-store.js'; //eslint-disable-line import UserSchema from '../../models/user'; function nearbyDriverHandler(socket) { socket.on('updatePickupRegion', userRegion => { // get the rider id // update the coordinates in database // for simulation emit coordinates to all connected drivers // fire query to get nearby drivers from database // emit the resultant array in callback const coordinates = [userRegion.region.longitude, userRegion.region.latitude]; const userId = userRegion.user._id; // console.log(userId, '========================='); // for simulation only // socket.broadcast.emit('riderMapCoordinates', coordinates); // simulation ends UserSchema.findOneAndUpdateAsync({ _id: userId }, { $set: { mapCoordinates: coordinates } }, { new: true }) .then(updatedUser => UserSchema.findAsync({ $and: [ { gpsLoc: { $geoWithin: { $centerSphere: [updatedUser.mapCoordinates, config.radius], }, }, }, { currTripId: null, currTripState: null }, { loginStatus: true }, { userType: '2' }, { isAvailable: true }, ], }) ) .then(driverArray => { if (driverArray) { console.log(driverArray.length, 'driverArray'); SocketStore.emitByUserId(userId, 'nearByDriversList', driverArray); } }); }); } export default nearbyDriverHandler; <file_sep>import httpStatus from "http-status"; import APIError from "../helpers/APIError"; import config from "../../config/env"; import UserSchema from "../models/user"; import uuid from "uuid"; const debug = require("debug")("Taxi-app-backend-web-dashboard: admin-user"); function getAllUsers(req, res, next) { const limit = req.query.limit ? req.query.limit : config.limit; const pageNo = req.query.pageNo; const skip = pageNo ? (pageNo - 1) * limit : config.skip; const userType = req.query.userType; debug(`skip value: ${req.query.pageNo}`); UserSchema.countAsync({ userType }) .then(totalUserRecord => { //eslint-disable-line const returnObj = { success: true, message: `no of ${userType}s are zero`, // `no of active drivers are ${returnObj.data.length}`; data: null, meta: { totalNoOfPages: Math.ceil(totalUserRecord / limit), limit, currPageNo: pageNo, currNoOfRecord: 20 } }; if (totalUserRecord < 1) { return res.send(returnObj); } if (skip > totalUserRecord) { const err = new APIError( "Request Page does not exists", httpStatus.NOT_FOUND ); return next(err); } UserSchema.find({ userType }) .limit(limit) .skip(skip) .then(userData => { returnObj.data = transformReturnObj(userData); returnObj.message = `${userType}s found`; returnObj.meta.currNoOfRecord = returnObj.data.length; debug(`no of records are ${returnObj.meta.currNoOfRecord}`); return res.send(returnObj); }) .catch(err => { res.send("Error", err); }); }) .error(e => { const err = new APIError( `error occured while counting the no of users ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); debug("error inside getAllUsers records"); next(err); }); } function getTotalUsers(req, res) { // new users list UserSchema.find() .then(foundUser => { res.send(foundUser); }) .catch(err => { res.send("Error", err); }); } function getApprovePendingUsers(req, res, next) { const userType = req.query.userType; UserSchema.find({ $and: [{ userType }, { isApproved: "false" }] }) .then(foundPendingUsers => { const returnObj = { success: false, message: `no of pending ${userType}s are zero`, data: null, meta: { totalRecords: 0 } }; returnObj.data = foundPendingUsers; if (returnObj.data.length > 0) { returnObj.success = true; returnObj.message = `no of pending users are ${returnObj.data.length}`; returnObj.meta.totalRecords = `${returnObj.data.length}`; res.send(returnObj); } else { res.send(returnObj); } }) .catch(err => { next(err); }); } function approveUser(req, res, next) { console.log('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'); const id = req.query.id; UserSchema.findOneAndUpdateAsync({ _id: id }, { $set: { isApproved: true } }) .then(userUpdateData => { const returnObj = { success: false, message: "unable to update user , user id provided didnt match ", data: null }; returnObj.data = userUpdateData; console.log(returnObj, ">>>>>>>>>>>RETURNOBJECT<<<<<<<<<<<<<") if (returnObj.data) { returnObj.success = "true"; returnObj.message = "user updated"; res.send(returnObj); } }) .catch(err => { console.log(err, '37645834658346583475634853648573'); next(err); }); } function rejectUser(req, res, next) { // findOneAndRemove const id = req.query.id; UserSchema.findOneAndRemoveAsync({ _id: id }) .then(rejectUserData => { const returnObj = { success: false, message: "unable to delete user , user id provided didnt match ", data: null }; returnObj.data = rejectUserData; if (returnObj.data) { returnObj.success = "true"; returnObj.message = "user deleted"; res.send(returnObj); } }) .catch(err => { next(err); }); } function getActiveDriverDetails(req, res, next) { UserSchema.find({ $and: [ { userType: "driver" }, { loginStatus: "true" }, { isAvailable: "true" } ] }) .then(foundActiveDrivers => { const returnObj = { success: false, message: "no of active drivers are zero", data: null, meta: { totalRecords: 0 } }; returnObj.data = foundActiveDrivers; if (returnObj.data.length > 0) { returnObj.success = "true"; returnObj.message = `no of active drivers are ${returnObj.data.length}`; returnObj.meta.totalRecords = `${returnObj.data.length}`; res.send(returnObj); } else { returnObj.success = "false"; returnObj.message = `no of active drivers are ${returnObj.data.length}`; returnObj.meta.totalRecords = `${returnObj.data.length}`; res.send(returnObj); } }) .catch(err => { next(err); }); } function getActiveCustomerDetails(req, res, next) { UserSchema.find({ $and: [{ userType: "rider" }, { loginStatus: "true" }] }) .then(foundActiveCustomers => { const returnObj = { success: false, message: "no of active customers are zero", data: null, meta: { totalRecords: 0 } }; returnObj.data = foundActiveCustomers; if (returnObj.data.length > 0) { returnObj.success = "true"; returnObj.message = `no of active customers are ${ returnObj.data.length }`; returnObj.meta.totalRecords = `${returnObj.data.length}`; res.send(returnObj); } }) .catch(err => { next(err); }); } function getUsersDetails(req, res, next) { const userId = req.params.userId; const returnObj = { success: false, message: "user Id is not defined", data: null }; if (userId) { UserSchema.findByIdAsync(userId) .then(userData => { if (userData) { returnObj.success = true; returnObj.message = "user found and its corresponding details"; returnObj.data = userData; } else { returnObj.success = false; returnObj.message = "user not found with the given id"; returnObj.data = null; } res.send(returnObj); }) .error(e => { const err = new APIError( `Error occured while findind the user details ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } else { res.send(returnObj); } } function updateUserDetails(req, res, next) { const userId = req.body._id; //eslint-disable-line const updateUserObj = Object.assign({}, req.body); UserSchema.findOneAsync({ _id: userId }) .then(userDoc => { if (userDoc) { userDoc.fname = updateUserObj.fname ? updateUserObj.fname : userDoc.fname; userDoc.lname = updateUserObj.lname ? updateUserObj.lname : userDoc.lname; userDoc.phoneNo = updateUserObj.phoneNo ? updateUserObj.phoneNo : userDoc.phoneNo; userDoc.address = updateUserObj.address ? updateUserObj.address : userDoc.address; userDoc.city = updateUserObj.city ? updateUserObj.city : userDoc.city; userDoc.state = updateUserObj.state ? updateUserObj.state : userDoc.state; userDoc.country = updateUserObj.country ? updateUserObj.country : userDoc.country; const returnObj = { success: false, message: "unable to find the object", data: null, meta: null }; userDoc .saveAsync() .then(savedDoc => { if (savedDoc.password) { debug("inside password delete function"); savedDoc = savedDoc.toObject(); delete savedDoc.password; } returnObj.success = true; returnObj.message = "user document saved"; returnObj.data = savedDoc; res.send(returnObj); }) .error(e => { const err = new APIError( `Error occured while updating the user details ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } }) .error(e => { const err = new APIError( `Error occured while searching for the user ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } function userStats(req, res, next) { const returnObj = { success: false, message: "no data available", data: null }; UserSchema.aggregateAsync([ { $match: { $or: [{ userType: "driver" }, { userType: "rider" }] } }, { $group: { _id: "riderDriverRatio", rider: { $sum: { $cond: [{ $eq: ["$userType", "rider"] }, 1, 0] } }, driver: { $sum: { $cond: [{ $eq: ["$userType", "driver"] }, 1, 0] } }, totalUser: { $sum: 1 } } } ]) .then(userStatsData => { returnObj.success = true; returnObj.message = "user chart data"; returnObj.data = userStatsData; return res.send(returnObj); }) .error(e => { const err = new APIError( `Error occurred while computing statistic for user ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } // this function removes carDetails from the rider object and for driver object add car details a object function transformReturnObj(userData) { for (let i = 0; i < userData.length; i++) { //eslint-disable-line if (userData[i].userType === "rider" && userData[i].carDetails) { delete userData[i].carDetails; } } return userData; } function changePassword(req, res, next) { const userObj = { email: req.body.email, userType: req.body.userType }; UserSchema.findOneAsync(userObj, "+password") .then(user => { //eslint-disable-line const returnObj = { success: false, message: "", data: null }; if (!user) { const err = new APIError( "User not found with the given email id", httpStatus.NOT_FOUND ); return next(err); } else { user.comparePassword(req.body.oldpassword, (passwordError, isMatch) => { //eslint-disable-line if (passwordError || !isMatch) { const err = new APIError( "Incorrect old password", httpStatus.UNAUTHORIZED ); return next(err); } user.password = <PASSWORD>; user .saveAsync() .then(savedUser => { returnObj.success = true; returnObj.message = "password changed successfully"; returnObj.data = savedUser; return res.send(returnObj); }) .error(e => { const err = new APIError( `Error while changing password ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); returnObj.success = false; returnObj.message = "password not changed"; console.log(err); return next(returnObj); }); }); } }) .error(e => { const err = new APIError( `erro while finding user ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } function createNewUser(req, res, next) { const userData = Object.assign({}, req.body); UserSchema.findOneAsync({ email: userData.email, userType: userData.userType }) .then(foundUser => { //eslint-disable-line const returnObj = { success: false, message: "", data: null }; if (foundUser !== null) { const err = new APIError("Email Id Already Exist", httpStatus.CONFLICT); return next(err); } const userObj = new UserSchema({ email: userData.email, password: <PASSWORD>.password ? <PASSWORD>.password : "<PASSWORD>", userType: userData.userType, fname: userData.fname, lname: userData.lname, dob: userData.dob, phoneNo: userData.phoneNo, userName: uuid(), bloodGroup: userData.bloodGroup ? userData.bloodGroup : null, gpsLoc: [userData.lat, userData.lon], emergencyDetails: userData.userType === "rider" ? { phone: userData.emergencyDetails.phone ? userData.emergencyDetails.phone : "", name: userData.emergencyDetails.name ? userData.emergencyDetails.name : "", imgUrl: null } : { phone: "", name: "", imgUrl: null }, carDetails: userData.userType === "driver" ? { type: userData.carDetails.type ? userData.carDetails.type : "Sedan", company: userData.carDetails.company ? userData.carDetails.company : "Maruti", regNo: userData.carDetails.regNo ? userData.carDetails.regNo : "", RC_ownerName: userData.carDetails.RC_ownerName ? userData.carDetails.RC_ownerName : "", vehicleNo: userData.carDetails.vehicleNo ? userData.carDetails.vehicleNo : "", carModel: userData.carDetails.carModel ? userData.carDetails.carModel : "", regDate: userData.carDetails.regDate ? userData.carDetails.regDate : "" } : {}, insuranceUrl: userData.userType === "driver" ? userData.vehicleDocuments.insuranceUrl : null, rcBookUrl: userData.userType === "driver" ? userData.vehicleDocuments.rcBookUrl : null, licenceUrl: userData.userType === "driver" ? userData.licenceDocuments.licenceUrl : null, vechilePaperUrl: userData.userType === "driver" ? userData.licenceDocuments.vechilePaperUrl : null, licenceDetails: userData.userType === "driver" ? { licenceNo: userData.licenceDetails.licenceNo ? userData.licenceDetails.licenceNo : null, issueDate: userData.licenceDetails.issueDate ? userData.licenceDetails.issueDate : null, expDate: userData.licenceDetails.expDate ? userData.licenceDetails.expDate : null } : {}, bankDetails: userData.userType === "driver" ? { accountNo: userData.bankDetails.accountNo ? userData.bankDetails.accountNo : null, holderName: userData.bankDetails.holderName ? userData.bankDetails.holderName : "", IFSC: userData.bankDetails.IFSC ? userData.bankDetails.IFSC : "" } : {}, mapCoordinates: [0, 0], loginStatus: true }); userObj .saveAsync() .then(savedUser => { returnObj.success = true; returnObj.message = "user created successfully"; returnObj.data = savedUser; return res.send(returnObj); }) .error(e => { const err = new APIError( `Error while Creating new User ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); returnObj.success = false; returnObj.message = "user not created"; console.log(err); return next(returnObj); }); }) .error(e => { const err = new APIError( `Error while Searching the user ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); return next(err); }); } export default { rejectUser, approveUser, getApprovePendingUsers, getAllUsers, getUsersDetails, updateUserDetails, userStats, createNewUser, getTotalUsers, getActiveDriverDetails, getActiveCustomerDetails, changePassword }; <file_sep>/* eslint-disable */ function transformReturnObj(Data) { if (Data instanceof Object) { Data = Data.toObject(); if (Data.riderId) { Data.rider = Data.riderId; Data.riderId = Data.rider._id ? Data.rider._id : null; } if (Data.driverId) { Data.driver = Data.driverId; Data.driverId = Data.driver._id ? Data.driver._id : null; } } return Data; } export default { transformReturnObj }; <file_sep>/* eslint-disable */ const childProcess = require('child_process'); function runScript(scriptPath, callback) { // keep track of whether callback has been invoked to prevent multiple invocations let invoked = false; const process = childProcess.fork(scriptPath); // listen for errors as they may prevent the exit event from firing process.on('error', err => { if (invoked) return; invoked = true; callback(err); }); // execute the callback once the process has finished running process.on('exit', code => { if (invoked) return; invoked = true; const err = code === 0 ? null : new Error('exit code', code); callback(err); }); } runScript('./Script/appConfig.js', err => { if (err) throw err; console.log('finished running appConfig.js'); }); runScript('./Script/superAdmin.js', err => { if (err) throw err; console.log('finished running superAdmin.js'); }); runScript('./Script/serverConfig.js', err => { if (err) throw err; console.log('finished running serverConfig.js'); }); // Now we can run a script and invoke a callback when complete, e.g. runScript('./Script/admin.js', err => { if (err) throw err; console.log('finished running admin.js'); }); <file_sep>import stripePackage from "stripe"; import AppConfig from "../models/appConfig"; import Transaction from "../models/transaction"; import User from "../models/user"; import Wallet from "../models/wallet"; function getStripeKey() { return new Promise((resolve, reject) => { AppConfig.findOneAsync({ key: "stripeConfig" }) .then(foundDetails => { resolve(foundDetails.value.stripekey); }) .catch(err => { reject(err); }); }); } function checkSaveCard(req, res) { User.findOneAsync({ email: req.body.email }) .then(foundUser => { const cardDetails = []; if (foundUser.cardDetails.length !== 0) { foundUser.cardDetails.map(obj => { //eslint-disable-line const cardObj = { brand: obj.brand, country: obj.country, cvc_check: obj.cvc_check, last4: obj.last4, fingerprint: obj.fingerprint, funding: obj.funding, exp_year: obj.exp_year, exp_month: obj.exp_month }; cardDetails.push(cardObj); }); res.send({ data: cardDetails, message: "Card Exist" }); } else { res.send({ message: "No Saved Card" }); } }) .catch(err => { console.log(err, "Error"); //eslint-disable-line res.send({ data: err, message: "Error" }); }); } function removeCard(req, res) { User.findOneAsync({ email: req.body.email }) .then(foundUser => { const cardDetails = foundUser.cardDetails; let indexOfCard = -1; if (cardDetails.length !== 0) { cardDetails.map((obj, index) => { //eslint-disable-line if (obj.fingerprint === req.body.fingerprint) { indexOfCard = index; } }); } if (indexOfCard === -1) { res.send({ message: "Card Not Found" }); } else { cardDetails.splice(indexOfCard, 1); User.findOneAndUpdateAsync( { _id: foundUser._id }, { $set: { cardDetails } }, { new: true } ) //eslint-disable-line .then(updateUser => { const newCardDetails = updateUser.cardDetails; res.send({ data: newCardDetails, message: "Card Successfully Removed" }); }) .catch(err => { res.send({ data: err, message: "Unable to delete card" }); }); } }) .catch(err => { res.send({ data: err, message: "Error in removing card" }); }); } function addCard(req, res) { const paymentDetails = req.body; getStripeKey().then(key => { const stripe = stripePackage(key); User.findOneAsync({ email: paymentDetails.email }) .then(foundUser => { const user = foundUser; if (user.userCardId) { stripe.customers.create({ source: paymentDetails.token, email: paymentDetails.email, }) .then(newCard => { const newCardDetails = user.cardDetails; let checkUser = false; newCardDetails.map(obj => { // console.log(newCard.sources.data[0]) // return let new_data = newCard.sources.data //eslint-disable-line new_data.map(obj_new =>{ console.log(obj_new.fingerprint) console.log("here") console.log(obj.fingerprint) if (obj_new.fingerprint === obj.fingerprint) { checkUser = true; res.send({ message: "Card Already Present" }); } }) }); // console.log(checkUser) // console.log(paymentDetails) // if (checkUser === true) { // res.send({ message: "Card Already Present" }); // } else if (paymentDetails.saveCard) { console.log("herrrrrrrrr") newCardDetails.push(newCard.sources.data[0]); User.findOneAndUpdateAsync( { _id: user._id }, { $set: { cardDetails: newCardDetails, } }, { new: true } ) //eslint-disable-line .then(updateUser => { res.send({ message: "Successfully Added", data: updateUser }); }) .catch(err => { res.send({ data: err, message: "Error in adding new card details in database" }); }); } else { res.send({ message: "Card is not saved in database" }); } }) .catch(err => { res.send({ data: err, message: "Error in adding card to Stripe Account" }); }); } else { stripe.customers .create({ email: paymentDetails.email }) .then(customer => { User.findOneAndUpdateAsync( { _id: user._id }, { $set: { // cardDetails: newCardDetails, userCardId: customer.id } }, { new: true } ) return stripe.customers.create({ source: paymentDetails.token, email: paymentDetails.email, }) // stripe.customers.createSource(customer.id, { // source: { // object: "card", // exp_month: paymentDetails.expiryMonth, // exp_year: paymentDetails.expiryYear, // number: paymentDetails.cardNumber, // cvc: paymentDetails.cvc // } // }); }) .then(source => { // console.log(source.data.id) const newCardDetails = user.cardDetails; newCardDetails.push(source.sources.data[0]); User.findOneAndUpdateAsync( { _id: user._id }, { $set: { cardDetails: newCardDetails, // userCardId: source.customer } }, { new: true } ) //eslint-disable-line .then(updateUser => { res.send({ message: "Card successfully added and customer id created", data: updateUser }); }) .catch(err => { res.send({ data: err, message: "Error in adding new card data for new user" }); }); }) .catch(err => { res.send({ data: err, message: "Error in adding new card in stripe" }); }); } }) .catch(err => { res.send({ data: err, message: "Error in finding user" }); }); }); } function updateCard(req, res) { const cardDetails = req.body; getStripeKey().then(key => { const stripe = stripePackage(key); User.findOneAsync({ email: cardDetails.email }) .then(foundUser => { const user = foundUser; let cardId = null; if (cardDetails.fingerprint) { user.cardDetails.map(obj => { //eslint-disable-line if (cardDetails.fingerprint === obj.fingerprint) { cardId = obj.id; } }); if (cardId) { stripe.customers .update(user.userCardId, { default_source: cardId }) .then(checkCard => { console.log("Deault Card Changed", checkCard); //eslint-disable-line }) .catch(err => { res.send({ data: err, message: "Error in changing default card" }); }); } else { res.send({ message: "No card found " }); } res.send({ message: "Updated Successfully" }); } else { res.send({ message: "Fingerprint data not available" }); } }) .catch(err => { res.send({ data: err, message: "Error in updating card details" }); }); }); } function cardPayment(tripObj) { // getStripeKey().then(key => { // console.log(stripe.charges.create({ // amount: 1000, // currency: 'usd', // customer: '<KEY>', // source: 'tok_1GWNATHHPLhdheLYIbxeiDhT', // })); // }); // return true; // { // "email":"<EMAIL>", // "tripAmt":"1000", // "customer":"<KEY>" // } return new Promise((resolve, reject) => { getStripeKey().then(key => { const stripe = stripePackage(key); stripe.setTimeout(20000); // User.findOneAsync({ email: tripObj.rider.email }) User.findOneAsync({ _id: tripObj.riderId }) .then(foundUser => { console.log("userCardId") const user = foundUser; console.log(user.cardDetails[0].customer) stripe.charges .create({ amount: 1000, currency: "usd", customer: user.cardDetails[user.cardDetails.length - 1].customer // source: tripObj.source }) .then(charge => { const paymentStatus = charge.status; // add transaction here /*********************After use */ // const transactionDriver = new Transaction({ // userIdTo: tripObj.driverId, // userIdFrom: tripObj.riderId, // amount: Number(tripObj.tripAmt) / 100, // tripId: tripObj._id, //eslint-disable-line // // walletIdFrom: tripObj.rider.email, // // walletIdTo: tripObj.driver.email // }); // transactionDriver.saveAsync().then(transactionRider => { resolve(paymentStatus); // // const returnObj = { // // success: true, // // message: "", // // data: {} // // }; // // returnObj.data.user = transactionRider; // // returnObj.message = // // "transaction created successfully wallet was not present"; // }); /********************after use */ }) .catch(err => { const paymentStatus = "error"; console.log(err); //eslint-disable-line // transaction here failed resolve(paymentStatus); }); }) .catch(err => { const paymentStatus = "error"; console.log(err); //eslint-disable-line reject(paymentStatus); }); }); }).catch(e => { console.log("test", e); //eslint-disable-line }); } function getBalance(req, res) { Wallet.findOneAsync({ userEmail: req.body.email }).then(foundWallet => { if (foundWallet !== null) { const returnObj = { success: true, message: "", data: {} }; returnObj.data.user = foundWallet; returnObj.message = "Wallet Present for this account"; res.send(returnObj); } else { const returnObj = { success: false, message: "", data: {} }; returnObj.data.user = foundWallet; returnObj.message = "No wallet Present for this account"; res.send(returnObj); } }); } export function payAll(tripObj) { Wallet.findOneAndUpdateAsync( { userEmail: tripObj.rider.email }, { $inc: { walletBalance: -Number(tripObj.tripAmt) * 100 } } ).then(updateWalletObj => { if (updateWalletObj) { // transaction insert const transactionOwner = new Transaction({ userIdFrom: tripObj.riderId, tripId: tripObj._id, //eslint-disable-line amount: Number(tripObj.tripAmt) * 20, // couz value is in cents walletIdFrom: tripObj.rider.email }); transactionOwner.saveAsync().then(transactionRider => { const returnObj = { success: true, message: "", data: {} }; returnObj.data.user = transactionRider; returnObj.message = "transaction created successfully wallet was present"; }); Wallet.findOneAndUpdateAsync( { userEmail: tripObj.driver.email }, { $inc: { walletBalance: Number(tripObj.tripAmt) * 80 } } ).then(WalletObjDriver => { console.log(WalletObjDriver); //eslint-disable-line const transactionDriver = new Transaction({ userIdTo: tripObj.driverId, userIdFrom: tripObj.riderId, amount: Number(tripObj.tripAmt) * 80, tripId: tripObj._id, //eslint-disable-line walletIdFrom: tripObj.rider.email, walletIdTo: tripObj.driver.email }); transactionDriver.saveAsync().then(transactionRider => { const returnObj = { success: true, message: "", data: {} }; returnObj.data.user = transactionRider; returnObj.message = "transaction created successfully wallet was not present"; }); }); } else { const returnObj = { success: false, message: "", data: {} }; returnObj.data.user = updateWalletObj; returnObj.message = "walletBalance updatation failed"; returnObj.success = false; } }); } function earning(req,res,next){ Transaction .find({ userIdTo: req.body.driverId }, ) .then(updateWalletObj => { var sum = 0; updateWalletObj.forEach((obj)=>{ sum = sum+obj.amount }) const returnObj = { success: true, message: "My earnings", data: sum }; // returnObj.data.user = transactionRider; // returnObj.message = "transaction created successfully"; res.send(returnObj); }) ; } function addBalance(req, res, next) { Wallet.findOneAndUpdateAsync( { userEmail: req.body.riderEmail }, { $inc: { walletBalance: Number(req.body.amount) } } ) .then(updateWalletObj => { if (updateWalletObj) { // transaction insert const transactionOwner = new Transaction({ userIdFrom: req.body.riderEmail, tripId: req.body.tripId, amount: Number(req.body.amount), walletIdFrom: req.body.riderEmail }); transactionOwner.saveAsync().then(transactionRider => { const returnObj = { success: true, message: "", data: {} }; returnObj.data.user = transactionRider; returnObj.message = "transaction created successfully"; res.send(returnObj); }); } else { const wallet = new Wallet({ userEmail: req.body.riderEmail, walletBalance: req.body.amount }); wallet.saveAsync().then(savedWallet => { console.log(savedWallet); //eslint-disable-line const transactionOwner = new Transaction({ userIdFrom: req.body.riderEmail, tripId: req.body.tripId, amount: Number(req.body.amount), walletIdFrom: req.body.riderEmail }); transactionOwner .saveAsync() .then(transactionRider => { const returnObj = { success: true, message: "", data: {} }; returnObj.data.user = transactionRider; returnObj.message = "transaction created successfully"; res.send(returnObj); }) .error(e => { console.log("error", e); }); //eslint-disable-line }); } }) .error(e => { next(e); }); } export function saveTransaction(tripObj) { const transactionOwner = new Transaction({ userIdFrom: tripObj.riderId, tripId: tripObj._id, //eslint-disable-line amount: Number(tripObj.tripAmt), userIdTo: tripObj.driverId }); transactionOwner.saveAsync().then(transactionRider => { const returnObj = { success: true, message: "", data: {} }; returnObj.data.user = transactionRider; returnObj.message = "Transaction created successfully"; }); } export default { getStripeKey, payAll, getBalance, addBalance, checkSaveCard, removeCard, addCard, cardPayment, updateCard, saveTransaction, earning }; <file_sep>import express from 'express'; import appConfigCtrl from '../controllers/appConfig'; import userCtrl from '../controllers/user'; const router = express.Router(); router.route('/forgot') .post(userCtrl.forgotPassword); // /** GET /api/config/appConfig - Returns mobileApp config */ router.route('/appConfig') .get(appConfigCtrl.getConfig) .post(appConfigCtrl.updateConfig); export default router; <file_sep>import express from "express"; import validate from "express-validation"; import httpStatus from "http-status"; import passport from "passport"; import APIError from "../helpers/APIError"; import config from "../../config/env"; import paramValidation from "../../config/param-validation"; import userCtrl from "../controllers/user"; const router = express.Router(); /** POST /api/users/register - create new user and return corresponding user object and token */ router .route("/register") .post(validate(paramValidation.createUser), userCtrl.create); /** * Middleware for protected routes. All protected routes need token in the header in the form Authorization: JWT token */ router.use((req, res, next) => { passport.authenticate( "jwt", config.passportOptions, (error, userDtls, info) => { //eslint-disable-line if (error) { const err = new APIError( "token not matched", httpStatus.INTERNAL_SERVER_ERROR ); return next(err); } else if (userDtls) { req.user = userDtls; next(); } else { const err = new APIError( `token not matched ${info}`, httpStatus.UNAUTHORIZED ); return next(err); } } )(req, res, next); }); router .route("/") /** GET /api/users - Get user */ .get(userCtrl.get) /** PUT /api/users - Update user */ .put(userCtrl.update) /** DELETE /api/users - Delete user */ .delete(userCtrl.remove); router .route('/person/:id') .get(userCtrl.getUserDetails); router .route('/person/list/:id') .get(userCtrl.fetchPersonUsers, userCtrl.fetchFollowingsOrFollowers); router .route('/list') .get(userCtrl.fetchMyProfileUsers, userCtrl.fetchFollowingsOrFollowers); router .route('/add-following') /** POST /api/users/add-follower - Add User to the following list */ .post(userCtrl.addFollowing); router .route('/remove-following') /** POST /api/users/remove-following- Remove User from the following list */ .post(userCtrl.removeFollowing); router .route('/remove-follower') /** POST /api/users/remove-follower - Remove User from the following list */ .post(userCtrl.removeFollower); router .route('/search') .get(userCtrl.searchUser); /** Load user when API with userId route parameter is hit */ router.param("userId", userCtrl.load); router .route("/upload") /** PUT /api/users/upload - Update user pic */ .put(userCtrl.upload); router .route("/test-server") /** PUT /api/users/upload - Update user pic */ .get(userCtrl.testServer); router .route('/addWorkImage') .put(userCtrl.addWorkImage) export default router; <file_sep>import mongoose from 'mongoose'; const Schema = mongoose.Schema; const TripRequestSchema = new Schema({ riderId: { type: Schema.Types.ObjectId, ref: 'User' }, driverId: { type: Schema.Types.ObjectId, ref: 'User' }, tripId: { type: Schema.Types.ObjectId, ref: 'trip' }, srcLoc: { type: [Number], index: '2d' }, destLoc: { type: [Number], index: '2d' }, paymentMode: { type: String, default: 'CASH' }, tripRequestStatus: { type: String, default: 'request' }, tripRequestIssue: { type: String, default: 'busy' }, pickUpAddress: { type: String, default: null }, destAddress: { type: String, default: null }, latitudeDelta: { type: Number, default: 0.012 }, longitudeDelta: { type: Number, default: 0.012 }, requestTime: { type: Date, default: Date.now }, bookTime: { type: Date, default: Date.now }, }); TripRequestSchema.statics = { userList({ skip = 0, limit = 10, userId = null, userType = null } = {}) { let searchObj = {}; if (userType === 'rider') { searchObj = {}; searchObj.riderId = userId; } if (userType === 'driver') { searchObj = {}; searchObj.driverId = userId; } return this.find(searchObj) .skip(skip) .limit(limit) .populate('riderId driverId tripId') .execAsync(); }, getUserCount(userType, userId) { let searchObj = {}; if (userType === 'rider') { searchObj = {}; searchObj.riderId = userId; } if (userType === 'driver') { searchObj = {}; searchObj.driverId = userId; } return this.countAsync(searchObj); } }; export default mongoose.model('tripRequest', TripRequestSchema); <file_sep>import mongoose from "mongoose"; const Schema = mongoose.Schema; const albumImageSchema = new Schema({ albumId: { type: Schema.Types.ObjectId, ref: "album" }, imageUrl: { type: String, default: null }, postedAt: { type: Date, default: Date.now() } }); export default mongoose.model("albumImage", albumImageSchema); <file_sep>import httpStatus from "http-status"; import jwt from "jsonwebtoken"; import cloudinary from "cloudinary"; import { get } from "lodash"; import formidable from "formidable"; import APIError from "../helpers/APIError"; import { fetchReturnObj } from "../service/transform-response"; import TripSchema from "../models/trip"; import TripRequestSchema from "../models/trip-request"; import UserSchema from "../models/user"; import SendNotification from "../service/pushNotification"; import paymentCtrl from "../controllers/payment"; import config from "../../config/env"; import ServerConfig from "../models/serverConfig"; /** * Return the trip details of the user. * @param req * @param res * @param next * @returns { trip: historyObjArray[{ tripObj }] } */ function getHistory(req, res, next) { const historyObjArray = []; const recentObjArray = []; const userID = req.user._id; //eslint-disable-line const userType = req.user.userType; const searchObj = {}; if (userType === "1") { searchObj.riderId = userID; } else if (userType === "2") { searchObj.driverId = userID; } console.log(searchObj) TripSchema.find( { $and: [searchObj], $or:[{ tripStatus: "endTrip" },{tripStatus: "accepted"}] }, null, { sort: { bookingTime: -1 } }, (tripErr, tripObj) => { //eslint-disable-line if (tripErr) { const err = new APIError( `error while finding trip history for the user ${tripErr}`, httpStatus.INTERNAL_SERVER_ERROR ); return next(err); } if (tripObj.length !== 0) { tripObj.forEach((obj, index) => { fetchReturnObj(obj).then(transformedReturnObj => { if(transformedReturnObj.tripStatus === "endTrip"){ historyObjArray.push(transformedReturnObj); } else if(transformedReturnObj.tripStatus === "accepted"){ recentObjArray.push(transformedReturnObj); } if (index === tripObj.length - 1) { const returnObj = { success: true, message: "user trip history", history: historyObjArray, recent: recentObjArray }; res.send(returnObj); } }); }); } else { const returnObj = { success: true, message: "no history available", data: [] }; res.send(returnObj); } } ); } /** * Get getCloudinaryDetails * @returns {Cloudinary Details} */ function getCloudinaryDetails() { return new Promise((resolve, reject) => { ServerConfig.findOneAsync({ key: "cloudinaryConfig" }) .then(foundDetails => { resolve(foundDetails.value); }) .catch(err => { reject(err); }); }); } /** * Return the trip details of the user. * @param req * @param res * @param next * @returns { tripObj } */ function updateTrip(req, res, next) { console.log(req, "reqreqreqreq"); //eslint-disable-line const userType = req.user.userType; getCloudinaryDetails() .then(value => { if (value) { cloudinary.config({ cloud_name: value.cloud_name, api_key: value.api_key, api_secret: value.api_secret }); const form = new formidable.IncomingForm(); form.on("error", err => { console.error(err, "error heree"); //eslint-disable-line }); form.parse(req, (err, fields, files) => { const img = get(files, "image.path", ""); const tripID = fields.tripId; cloudinary.v2.uploader.upload( img, // { // transformation: [ // { // effect: 'improve', // gravity: 'face', // height: 100, // width: 100, // crop: 'fill', // }, // { quality: 'auto' }, // ], // }, (error, results) => { if (results) { TripSchema.findOneAndUpdateAsync( { _id: fields.tripId }, { $set: { receiptUrl: results.url } }, { new: 1, runValidators: true } ) .then(updatedTripObj => { //eslint-disable-line const returnObj = { success: false, message: "unable to update trip object as trip id provided didnt match", data: null, meta: null }; if (updatedTripObj) { returnObj.success = true; returnObj.message = "trip object updated"; returnObj.data = updatedTripObj; res.send(returnObj); } else { const err = new APIError( "Trip Id did not matched", httpStatus.BAD_REQUEST ); return next(err); } }) .error(e => { const err = new APIError( `Error occured while updatating trip object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } } ); }); } }) .catch(e => { const err = new APIError( `Error occured while updatating trip object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } function listing_trip(req, res, next) { console.log("simran here") const requestObjArray = []; const userID = req.user._id; //eslint-disable-line const userType = req.user.userType; const searchObj = {}; if (userType === "2") { searchObj.driverId = userID; console.log(userID); console.log("<NAME> ") TripRequestSchema .find({ $and: [searchObj, { $or: [{ tripRequestStatus: "request" },{tripRequestStatus: "accepted"}] } ] }, null, { sort: { requestTime: -1 } }) .populate('riderId') .exec(function (tripErr, tripObj) { if (tripErr) { const err = new APIError( `error while finding trip history for the user ${tripErr}`, httpStatus.INTERNAL_SERVER_ERROR ); return next(err); } if (tripObj.length !== 0) { tripObj.forEach((obj, index) => { console.log("this is obj") console.log(obj.driverId) // var driverId = obj.driverId obj.details = obj.driverId; fetchReturnObj(obj).then(transformedReturnObj => { transformedReturnObj.tripId = transformedReturnObj._id var riderInfo = transformedReturnObj.riderId transformedReturnObj.riderId =riderInfo._id transformedReturnObj.riderFirstName =riderInfo.fname transformedReturnObj.riderLastNmae =riderInfo.lname transformedReturnObj.riderAddress =riderInfo.address transformedReturnObj.riderCity =riderInfo.city transformedReturnObj.riderState =riderInfo.state transformedReturnObj.riderRountry =riderInfo.country transformedReturnObj.riderProfileUrl =riderInfo.profileUrl transformedReturnObj.riderEmail =riderInfo.email transformedReturnObj.riderPhone =riderInfo.phoneNo transformedReturnObj.riderGpsLoc =riderInfo.gpsLoc // console.log("check from simran============") requestObjArray.push(transformedReturnObj); if (index === tripObj.length - 1) { const returnObj = { success: true, message: "user trip request", data: requestObjArray }; res.send(returnObj); } }); }); } else { const returnObj = { success: true, message: "no trip request availiable", data: [] }; res.send(returnObj); } // prints "The creator is Aaron" }) } else if (userType === "1") { searchObj.riderId = userID; console.log(searchObj); console.log(userID) TripRequestSchema .find({ $and: [searchObj, { tripRequestStatus: "request" }] }, null, { sort: { requestTime: -1 } }) .populate('driverId') .exec(function (tripErr, tripObj) { if (tripErr) { const err = new APIError( `error while finding trip history for the user ${tripErr}`, httpStatus.INTERNAL_SERVER_ERROR ); return next(err); } console.log(tripObj) if (tripObj.length !== 0) { tripObj.forEach((obj, index) => { console.log("this is obj") console.log(obj.driverId) // var driverId = obj.driverId obj.details = obj.driverId; fetchReturnObj(obj).then(transformedReturnObj => { transformedReturnObj.tripId = transformedReturnObj._id var riderInfo = transformedReturnObj.driverId transformedReturnObj.driverId =riderInfo._id transformedReturnObj.driverFirstName =riderInfo.fname transformedReturnObj.driverLastNmae =riderInfo.lname transformedReturnObj.driverAddress =riderInfo.address transformedReturnObj.driverCity =riderInfo.city transformedReturnObj.driverState =riderInfo.state transformedReturnObj.driverCountry =riderInfo.country transformedReturnObj.driverProfileUrl =riderInfo.profileUrl transformedReturnObj.driverEmail =riderInfo.email transformedReturnObj.driverPhone =riderInfo.phoneNo transformedReturnObj.driverGpsLoc =riderInfo.gpsLoc requestObjArray.push(transformedReturnObj); if (index === tripObj.length - 1) { const returnObj = { success: true, message: "user trip request", data: requestObjArray }; res.send(returnObj); } }); }); } else { const returnObj = { success: true, message: "no trip request availiable", data: [] }; res.send(returnObj); } // prints "The creator is Aaron" }) } } function updateStatus(req, res, next){ var tripId = req.body.tripId; var tripStatus = req.body.tripStatus; console.log(req.body) if(req.body.tripStatus === "payment" ){ paymentCtrl.cardPayment(req.body).then(status => { console.log(status); }) } TripRequestSchema.findOneAndUpdate( { _id: tripId }, { $set: { tripRequestStatus: tripStatus } }, // { multi: true } ).then(result => { if(req.body.tripStatus === "payment" ){ UserSchema.findOneAsync({ _id: result.driverId }).then(userObj => { SendNotification(result.riderId, userObj.fname+" charge you payment $10"); }); } else{ UserSchema.findOneAsync({ _id: result.driverId }).then(userObj => { SendNotification(result.riderId, userObj.fname+" has "+tripStatus +" your request"); }); } // SendNotification(result.riderId, ); return res.send({ success: true, message: "Changed Status sucessfully", data: tripStatus }); }); } export default { getHistory, updateTrip ,listing_trip,updateStatus}; <file_sep>import fetch from 'node-fetch'; import UserSchema from '../models/user'; const url = `https://onesignal.com/api/v1/notifications`; function sendNotification(userId, notification) { UserSchema.findOneAsync({ _id: userId }).then(userObj => { console.log(userObj.deviceId) console.log("simran") if (!userObj) { throw new Error('No Such User Exist'); } const App_id =userObj.userType === '2' ?'1ba8a78e-51c8-413d-97cc-2b6f0905818e':'5a6a6c51-f619-409f-acf6-410e1b4f1178' // userObj.userType === '1' ? 'df137503-fb26-4180-aebc-ca6835152506' : '96124b53-6eb7-4fdf-bd98-d188b51e28de'; const Api_key =userObj.userType === '2' ?'<KEY>':'<KEY>' console.log() // userObj.userType === '1'? 'ZDU5ODgzMzUtNDhkYi00N2NhLWEzZjMtYzEzYzg3YjgwOTZm':'N2Q0YWY0OGQt<KEY>'; fetch(url, { method: 'POST', body: JSON.stringify({ app_id: App_id, contents: { en: notification }, include_player_ids: [userObj.deviceId],//['30ecabfa-3bc7-4c2b-b8a6-a5cb3245515f'], //userObj.deviceId data: { source: 'message' } }), headers: { 'Content-Type': 'application/json', Authorization: 'Basic ' + Api_key } }) .then(res => res.json()) .then(data => { console.log('RESPONSE', data); }) .catch(err => { console.log('ERROR', err); }); }); } export default sendNotification; <file_sep>import express from 'express'; import validate from 'express-validation'; import httpStatus from 'http-status'; import passport from 'passport'; import paramValidation from '../../config/param-validation'; import APIError from '../helpers/APIError'; import authCtrl from '../controllers/auth'; import config from '../../config/env'; const router = express.Router(); /** POST /api/auth/login - Returns token if correct email and password is provided */ router.route('/login').post(validate(paramValidation.login), authCtrl.login); router.route('/loginadmin').post(validate(paramValidation.loginadmin), authCtrl.loginadmin); router.route('/test-server').get(authCtrl.testServer); router.route('/checkuser').post(authCtrl.checkUser); /** * Middleware for protected routes. All protected routes need token in the header in the form Authorization: JWT token */ router.use((req, res, next) => { passport.authenticate('jwt', config.passportOptions, (error, userDtls, info) => { //eslint-disable-line if (error) { const err = new APIError('token not matched', httpStatus.UNAUTHORIZED); return next(err); } else if (userDtls) { req.user = userDtls; next(); } else { const err = new APIError(`token not matched and error msg ${info}`, httpStatus.UNAUTHORIZED); return next(err); } })(req, res, next); }); // router.route('/random-number') // .get(authCtrl.getRandomNumber); router.route('/logout').get(authCtrl.logout); export default router; <file_sep>import httpStatus from 'http-status'; import APIError from '../helpers/APIError'; import { fetchReturnObj } from '../service/transform-response'; import TripSchema from '../models/trip'; import TripRequestSchema from '../models/trip-request'; /** * Return the trip details of the user. * @param req * @param res * @param next * @returns { trip: historyObjArray[{ tripObj }] } */ function getSyncData(req, res, next) { // const userID = req.user._id; const currTripId = req.user.currTripId; const currTripState = req.user.currTripState; const returnObj = { success: true, message: 'user is not in any trip or tripRequest', data: { tripRequest: null, trip: null } }; if (currTripId === null || currTripId === undefined || currTripState === null || currTripState === undefined) { res.send(returnObj); } if (currTripState === 'tripRequest') { TripRequestSchema.findOneAsync({ $and: [{ _id: currTripId }, { $or: [{ tripRequestStatus: 'enRoute' }, { tripRequestStatus: 'arriving' }, { tripRequestStatus: 'arrived' }] }] }) .then((tripRequestObj) => { if (tripRequestObj) { fetchReturnObj(tripRequestObj).then((transformedTripRequestObj) => { returnObj.message = 'user is in tripRequest state'; returnObj.data.tripRequest = transformedTripRequestObj; res.send(returnObj); }) .error((e) => { const err = new APIError(`error occurred when transforming tripRequestObj ${e}`, httpStatus.INTERNAL_SERVER_ERROR); return next(err); }); } else { returnObj.message = 'no trip request object found for the current tripRequest state for the corresponding user'; res.send(returnObj); } }) .error((e) => { const err = new APIError(`error occurred when feteching user data from tripRequest schema ${e}`, httpStatus.INTERNAL_SERVER_ERROR); return next(err); }); } if (currTripState === 'trip') { TripSchema.findOneAsync({ $and: [{ _id: currTripId }, { tripStatus: 'onTrip' }] }) .then((tripObj) => { if (tripObj) { fetchReturnObj(tripObj).then((transformedTripObj) => { returnObj.message = 'user is in trip state'; returnObj.data.trip = transformedTripObj; returnObj.data.tripRequest = null; res.send(returnObj); }) .error((e) => { const err = new APIError(`error occurred when feteching user data from trip schema ${e}`, httpStatus.INTERNAL_SERVER_ERROR); return next(err); }); } else { returnObj.message = 'no trip object found for the current trip state for the corresponding user'; res.send(returnObj); } }) .error((e) => { const err = new APIError(`error occurred when feteching user data from trip schema ${e}`, httpStatus.INTERNAL_SERVER_ERROR); return next(err); }); } } export default { getSyncData }; <file_sep>## React Native Taxi App- Backend v4.2.1 Thanks for purchasing the ExpressJs ,SocketIo empowered TaxiApp. Follow the documentation to install and get started with the development: - [Documentation](https://docs.market.nativebase.io/react-native-taxi-app-with-backend/) - [Product Page](http://strapmobile.com/react-native-uber-like-app-backend/) Happy coding! <file_sep>import express from 'express'; import httpStatus from 'http-status'; import passport from 'passport'; import validate from 'express-validation'; import adminTrip from '../controllers/admin-trip'; import adminTripUser from '../controllers/admin-trip-user'; import adminUser from '../controllers/admin-user'; import APIError from '../helpers/APIError'; import config from '../../config/env'; import paramValidation from '../../config/param-validation'; import serverCtrl from '../controllers/server-config'; //eslint-disable-line const router = express.Router(); router .route('/trip') .get(validate(paramValidation.tripList), adminTrip.tripDetails) .post(validate(paramValidation.createNewTrip), adminTrip.createNewTrip) .put(validate(paramValidation.updateTripObject), adminTrip.updateTrip); router.route('/allusers').post(adminUser.getTotalUsers); router.route('/ongoingtrips').get(adminTrip.getOngoingTripDetails); router.route('/recentreviewedtrips').get(adminTrip.getRecentReviewedTripDetails); router.route('/approvePendingUsers').get(validate(paramValidation.pending), adminUser.getApprovePendingUsers); router.route('/approveUser').put(validate(paramValidation.approve), adminUser.approveUser); router.route('/rejectUser').put(validate(paramValidation.reject), adminUser.rejectUser); router.route('/activeDriverDetails').get(adminUser.getActiveDriverDetails); router.route('/activeCustomerDetails').get(adminUser.getActiveCustomerDetails); router.route('/specificusertrips/:userId').get(adminTrip.getSpecificUserTripDetails); router.route('/serverConfigObj').get(serverCtrl.getConfig); router.route('/serverConfig').post(serverCtrl.updateConfig); // /api/admin/user router .route('/user') .get(adminUser.getAllUsers) .post(validate(paramValidation.createNewUser), adminUser.createNewUser) .put(validate(paramValidation.updateUserByAdmin), adminUser.updateUserDetails); router.route('/changepassword').post(adminUser.changePassword); router.use((req, res, next) => { passport.authenticate('jwt', config.passportOptions, (error, userDtls, info) => { //eslint-disable-line if (error) { const err = new APIError('token not matched', httpStatus.UNAUTHORIZED); return next(err); } else if (userDtls && userDtls.userType === 'admin') { req.user = userDtls; next(); } else { const err = new APIError(`token not matched and error msg ${info}`, httpStatus.UNAUTHORIZED); return next(err); } })(req, res, next); }); // server Config router .route('/serverConfig') .get(serverCtrl.getConfig) .post(serverCtrl.updateConfig); // /api/admin/allusers router.route('/allusers').get(adminUser.getTotalUsers); router.route('/userDetails/:userId').get(adminUser.getUsersDetails); router.route('/user/userStatsChart').get(adminUser.userStats); // /api/admin/trip // .put(adminTrip.updateTrip); router.route('/trip/charts').get(validate(paramValidation.tripRevenueGraph), adminTrip.tripRevenueGraph); router.route('/trip/charts/:revenueYear').get(validate(paramValidation.tripRevenueGraph), adminTrip.tripRevenueGraph); router.route('/trip/:tripId').get(validate(paramValidation.userTripRequestList), adminTrip.loadTripDetails); router.route('/trip/user/:userId').get(validate(paramValidation.userTripRequestList), adminTripUser.userTripDetails); router.route('/trip/user/charts/:userId').get(validate(paramValidation.userTripRequestList), adminTripUser.userTripRequestStatics); export default router; <file_sep>/* eslint-disable */ import deferred from "deferred"; import Promise from "bluebird"; import httpStatus from "http-status"; import APIError from "../../helpers/APIError"; import AppConfig from "../../models/appConfig"; import config from "../../../config/env"; import { fetchReturnObj } from "../../service/transform-response"; import sendEmail from "../../service/emailApi"; import SendNotification from "../../service/pushNotification"; import sendSms from "../../service/smsApi"; import SocketStore from "../../service/socket-store.js"; //eslint-disable-line import TripRequest from "../../models/trip-request"; import UserSchema from "../../models/user"; import startTripHandler from './start-trip'; import TripSchema from "../../models/trip"; const watchIdObj = {}; const promObj = {}; /** * Get appConfig * @returns {appConfig} */ function getConfig() { return new Promise((resolve, reject) => { AppConfig.findOneAsync({ key: "sendConfig" }) .then(foundDetails => { resolve(foundDetails.value); }) .catch(err => { reject(err); }); }); } function requestTripHandler(socket) { socket.on("requestTrip", payload => { const quantum = 10; const riderID = payload.rider._id; nearByDriver(riderID) .then(nearByDriversDoc => { // console.log(nearByDriversDoc, 'nearby user'); for (let i = 0; i < nearByDriversDoc.length - 1; i++) { if (!checkSocketConnection(nearByDriversDoc[i]._id)) { nearByDriversDoc = removeDriverFromList(nearByDriversDoc, i); } } console.log(nearByDriversDoc) // roundRobinAsync(nearByDriversDoc, quantum, payload) // .then(result => { // console.log(result, "result round robin"); // if (result === false) { // payload.tripRequest.tripRequestStatus = "noNearByDriver"; // SendNotification(riderID, "No nearby drivers"); // socket.emit("requestTrip",{"requestTripData":nearByDriversDoc}) // SocketStore.emitByUserId( // payload.rider._id, // "tripRequestUpdated", // payload.tripRequest // ); // } // }) // .catch(e => console.log("error", e)); socket.emit("requestTrip",{"requestTripData":nearByDriversDoc}) }) .catch(e => console.log("error", e)); }); /////////////for requesting particular photographer /////////////for requesting particular photographer socket.on("requestPhotographer", payload => { const quantum = 10; const photographer_id = payload.photographer_id; const riderID = payload.rider._id; console.log(payload) createTripRequestObjAsync(payload,photographer_id) .then(requestPhotographerData => { // console.log(nearByDriversDoc, 'nearby user'); // for (let i = 0; i < requestPhotographerData.length - 1; i++) { // if (!checkSocketConnection(requestPhotographerData[i]._id)) { // requestPhotographerData = removeDriverFromList(requestPhotographerData, i); // } // } // console.log(payload) // console.log("requestPhotographerData") // console.log(requestPhotographerData) socket.emit("requestTrip",{"requestTripData":requestPhotographerData}) // socket.emit("requestTrip",{"requestTripData":nearByDriversDoc}) }) .catch(e => console.log("error", e)); }); ///photographer response in the form of accept and reject socket.on("photographerResponse", payload => { const tripId = payload.tripId; const tripRequestStatus = payload.tripRequestStatus; const tripRequestObj = {tripRequestStatus:payload.tripRequestStatus,latitudeDelta:payload.latitudeDelta,longitudeDelta:payload.longitudeDelta,pickUpAddress:payload.pickUpAddress,srcLoc:payload.srcLoc,destLoc:payload.destLoc,destAddress:payload.destAddress,tripIssue:payload.tripIssue} console.log("simran checkingggggggggg") console.log(payload) // const TripUpdate = TripRequest.findByIdAndUpdate(tripId,{$set:{tripRequestStatus:tripRequestStatus}}); // console.log(TripUpdate) const TripUpdate = TripRequest.findOneAndUpdateAsync( { _id: payload.tripId }, { $set: tripRequestObj }, { new: true } ) socket.emit("requestTrip",{"requestTripData":tripRequestStatus}) }); ///////////// socket.on("requestDriverResponse", tripRequestObj => { // clearInterval(watchIdObj[tripRequestObj._id]); // const driverId = tripRequest.driver._id; const driverId = tripRequestObj.driverId; // startTripHandler(socket) // socket.emit("startTrip",tripRequest =>{ // console.log("simran") // }) // promObj[driverId].resolve(tripRequest); // or resolve promise // socket.emit("requestTrip",{"requestTripData":tripRequest}) console.log("start trip called in apiserver------------>"); const riderID = tripRequestObj.riderId; const driverID = tripRequestObj.driverId; tripRequestObj.tripRequestStatus = "completed"; const tripObj = new TripSchema({ riderId: tripRequestObj.riderId, driverId: tripRequestObj.driverId, srcLoc: tripRequestObj.srcLoc, destLoc: tripRequestObj.destLoc, pickUpAddress: tripRequestObj.pickUpAddress, destAddress: tripRequestObj.destAddress, paymentMode: tripRequestObj.paymentMode }); tripObj .saveAsync() .then(savedTrip => { tripRequestObj.tripId = savedTrip._id; TripRequest.findOneAndUpdateAsync( { _id: tripRequestObj._id }, { $set: tripRequestObj } ).error(e => { SocketStore.emitByUserId(riderID, "socketError", e); SocketStore.emitByUserId(driverID, "socketError", e); }); UserSchema.updateAsync( { $or: [{ _id: savedTrip.riderId }, { _id: savedTrip.driverId }] }, { $set: { currTripId: savedTrip._id, currTripState: "trip" } }, { new: true, multi: true } ) .then(() => { fetchReturnObj(savedTrip).then(returnObj => { SendNotification(riderID, "Photographer has accepeted the request"); SocketStore.emitByUserId(riderID, "tripUpdated", returnObj); // cb(returnObj); }); }) .error(e => { SocketStore.emitByUserId(savedTrip.riderId, "socketError", { message: "error while updating currTripId of user to start Trip", data: e }); SocketStore.emitByUserId(savedTrip.driverId, "socketError", { message: "error while updating currTripId of user to start Trip", data: e }); }); }) .error(e => { // cb(null); console.log("some error occured inside the socket Error"); SocketStore.emitByUserId(riderID, "socketError", e); SocketStore.emitByUserId(driverID, "socketError", e); }); socket.emit("requestDriverResponse",{"requestDriverResponse":tripObj}) }); socket.on("tripRequestUpdate", payload => { TripRequest.findOneAndUpdateAsync( { _id: payload._id }, { $set: payload }, { new: true } ) .then(updatedTripRequestObject => { if (updatedTripRequestObject.tripRequestStatus === "cancelled") { UserSchema.updateAsync( { $or: [{ _id: payload.riderId }, { _id: payload.driverId }] }, { $set: { currTripId: null, currTripState: null } }, { new: true, multi: true } ) .then(() => { // updated user records }) .error(e => { SocketStore.emitByUserId(payload.riderId, "socketError", { message: "error while updating curTripId to null in requestDriverResponse", data: e }); SocketStore.emitByUserId(payload.driverId, "socketError", { message: "error while updating curTripId to null in requestDriverResponse", data: e }); }); } fetchReturnObj(updatedTripRequestObject).then(updatedTripRequestObj => { if ( socket.userId.toString() === updatedTripRequestObj.riderId.toString() ) { console.log( "updatedTripRequestObj.riderId", updatedTripRequestObj.riderId ); SendNotification( updatedTripRequestObj.riderId, updatedTripRequestObj.tripRequestStatus ); SendNotification( updatedTripRequestObj.driver, updatedTripRequestObj.tripRequestStatus ); SocketStore.emitByUserId( updatedTripRequestObj.driverId, "tripRequestUpdated", updatedTripRequestObj ); } else if ( socket.userId.toString() === updatedTripRequestObj.driverId.toString() ) { SocketStore.emitByUserId( updatedTripRequestObj.riderId, "tripRequestUpdated", updatedTripRequestObj ); SendNotification( updatedTripRequestObj.riderId, updatedTripRequestObj.tripRequestStatus ); SendNotification( updatedTripRequestObj.driver, updatedTripRequestObj.tripRequestStatus ); } }); }) .error(e => { // error occured while updating tripRequestObj SocketStore.emitByUserId(payload.riderId, "socketError", e); SocketStore.emitByUserId(payload.driverId, "socketError", e); }); }); // Round robin algorithm for driver dispatch: function roundRobinAsync(nearByDriversDoc, quantum, rider) { console.log(nearByDriversDoc); // returns promise which resolves in success and faliure boolean values // suppose 5 drivers // each driver will be sent request. // expect a response in quantum time. // if response is accept - assign that driver. break process and return // if response is reject - remove driver from the list and select next driver to request from queue // if no response - next driver please. // - no arrival time burst time concept. // - queue structure will be based on database query fetch. return new Promise((resolve, reject) => { const count = 0; const remain = nearByDriversDoc.length; const prom = deferred(); dispatchHandlerAsync( nearByDriversDoc, quantum, remain, count, rider, prom ) .then(result => resolve(result)) .catch(error => reject(error)); }); } function dispatchHandlerAsync( nearByDrivers, quantum, remain, count, rider, prom ) { console.log("here in dispatchHandlerAsync"); if (remain <= 0) { prom.resolve(false); return prom.promise; } promObj[nearByDrivers[count]._id] = deferred(); sendRequestAsync( nearByDrivers[count], quantum, rider, promObj[nearByDrivers[count]._id] ).then( tripRequest => { const response = tripRequest.tripRequestStatus; if (response === "enRoute") { dispatchDriverAsync(tripRequest) .then(() => prom.resolve(true)) .catch(error => prom.reject(error)); getConfig().then(data => { if (data.email.rideAcceptRider) { // sendEmail(tripRequest.riderId, tripRequest, "rideAccept"); } if (data.sms.rideAcceptRider) { // sendSms(tripRequest.riderId, "Your ride request is accepted ."); } }); } else if (response === "rejected") { resetTripRequestAsync(nearByDrivers[count]) // driver rejected so update the database to clear tripRequest made .then(() => { nearByDrivers = removeDriverFromList(nearByDrivers, count); // nearByDrivers.forEach((driver) => console.log(driver.Client connected to socket)); count = 0; remain--; setTimeout(() => { dispatchHandlerAsync( nearByDrivers, quantum, remain, count, rider, prom ); }, 1000); }); } }, () => { console.log("noResponseFromDriver"); nearByDrivers = removeDriverFromList(nearByDrivers, count); count = 0; remain--; setTimeout(() => { dispatchHandlerAsync( nearByDrivers, quantum, remain, count, rider, prom ); }, 1000); } ); return prom.promise; } function sendRequestAsync(driver, timeout, rider, def) { // return tripRequest object which contain response console.log("inside sendRequestAsync", driver.fname); createTripRequestObjAsync(rider, driver) .then(tripRequestObj => { // here for notificatioon to add final SendNotification(driver._id, "New Request"); SocketStore.emitByUserId(driver._id, "requestDriver", tripRequestObj); watchIdObj[tripRequestObj._id] = setInterval(() => { timeout--; if (timeout <= 0) { clearInterval(watchIdObj[tripRequestObj._id]); resetTripRequestAsync(driver) // driver did not respond so update the database to clear tripRequest made. .then(() => { SocketStore.emitByUserId(driver._id, "responseTimedOut"); // clear tripRequest object on driver side // flag = true; def.reject("noResponseFromDriver"); }); } }, 1000); }) .catch(err => console.log("error", err)); return def.promise; } function dispatchDriverAsync(tripRequestObj) { return new Promise(resolve => { TripRequest.findOneAndUpdateAsync( { _id: tripRequestObj._id }, { $set: tripRequestObj }, { new: true } ) .then(updatedTripRequestObject => resolve( fetchReturnObj(updatedTripRequestObject).then( updatedTripRequestObj => { if ( updatedTripRequestObj.tripRequestStatus === "noNearByDriver" ) { updatedTripRequestObj.rider = null; updatedTripRequestObj.driver = null; updatedTripRequestObj.driverId = null; } SocketStore.emitByUserId( tripRequestObj.riderId, "tripRequestUpdated", updatedTripRequestObj ); } ) ) ) .error(e => { SocketStore.emitByUserId(tripRequestObj.driverId, "socketError", e); }); }); } function removeDriverFromList(drivers, index) { // test passed return drivers.slice(0, index).concat(drivers.slice(index + 1)); } function createTripRequestObjAsync(payload, driver) { return new Promise(resolve => { const riderID = payload.rider._id; const srcLocation = payload.tripRequest.srcLoc; const destLocation = payload.tripRequest.destLoc; const pickUpAdrs = payload.tripRequest.pickUpAddress;/// const destAdrs = payload.tripRequest.destAddress; const latDelta = payload.rider.latitudeDelta; const lonDelta = payload.rider.longitudeDelta; const paymentMode = payload.tripRequest.paymentMode; const driverID = driver; const bookTime = payload.tripRequest.bookTime; const tripRequestObj = new TripRequest({ riderId: payload.rider._id, driverId: driverID, tripId: null, srcLoc: srcLocation, destLoc: destLocation, pickUpAddress: pickUpAdrs, destAddress: destAdrs, latitudeDelta: latDelta, longitudeDelta: lonDelta, bookTime:bookTime, paymentMode:paymentMode }); tripRequestObj.saveAsync() .then(savedTripRequest => { console.log("this is simran") UserSchema.findOneAsync({ _id: payload.rider._id }) .then(foundUser => { if (foundUser !== null) { console.log("this is simran here") sendEmail(driver, tripRequestObj, "requestTripForPhotoGrapher") // sendSms(driver,"Hi, "+foundUser.fname+ " has requested for photography at "+payload.tripRequest.destAddress) SendNotification(driver,"Hi, "+foundUser.fname+ " has requested for photography at "+payload.tripRequest.destAddress) } }) // UserSchema.findOneAsync({ _id: driverID }) // .then(userDoc =>{ // const accountSid = 'ACce5efac448dcb11e1269cec0ded3cd45'; // const authToken = '<PASSWORD>'; // const client = require('twilio')(accountSid, authToken); // console.log("debuggg") // const data = client.messages.create({ // body: 'This is the ship that made the Kessel Run in fourteen parsecs?', // from: '+12564488739', // to: userDoc.phoneNo // }) // // .then(message => console.log(message.sid)); // console.log(data.sid); // console.log(userDoc.phoneNo) // }); // savedTripRequest.rider = null; // savedTripRequest.driver = null; // _user["default"].updateAsync({ // $or: [{ // _id: savedTripRequest.riderId // }, { // _id: savedTripRequest.driverId // }] // }, { // $set: { // currTripId: savedTripRequest._id, // currTripState: "tripRequest" // } // }, { // "new": true, // multi: true // }).then(function () { // (0, _transformResponse.fetchReturnObj)(savedTripRequest).then(function (returnObj) { // return resolve(returnObj); // .then(() => { // fetchReturnObj(savedTripRequest).then(returnObj => return resolve(savedTripRequest) // ); // }) // }); // }).error(function (e) { // _socketStore["default"].emitByUserId(riderID, "socketError", { // message: "error while updating curTripId in requestTrip", // data: e // }); // _socketStore["default"].emitByUserId(driverID, "socketError", { // message: "error while updating curTripId in requestTrip", // data: e // }); // }); }).error(function (e) { _socketStore["default"].emitByUserId(riderID, "socketError", e); }); // }); }); } // tripRequestObj // .saveAsync() // .then(function (savedTripRequest) { // savedTripRequest => { // savedTripRequest.rider = null; // savedTripRequest.driver = null; // UserSchema.updateAsync( // { // $or: [ // { _id: savedTripRequest.riderId }, // { _id: savedTripRequest.driverId } // ] // }, // { // $set: { // currTripId: savedTripRequest._id, // currTripState: "tripRequest" // } // }, // { new: true, multi: true } // ) // .then(() => { // fetchReturnObj(savedTripRequest).then(returnObj => // resolve(returnObj) // ); // }) // .error(e => { // SocketStore.emitByUserId(riderID, "socketError", { // message: "error while updating curTripId in requestTrip", // data: e // }); // SocketStore.emitByUserId(driverID, "socketError", { // message: "error while updating curTripId in requestTrip", // data: e // }); // }); // }) // .error(e => { // SocketStore.emitByUserId(riderID, "socketError", e); // }); // }); // } function resetTripRequestAsync(driverObj) { // query to reset tripRequest object for a particular driver in database. return new Promise(resolve => { UserSchema.updateAsync( { $or: [{ _id: driverObj._id }] }, { $set: { currTripId: null, currTripState: null } }, { new: true, multi: true } ) .then(() => resolve()) .error(e => { SocketStore.emitByUserId(driverObj.riderId, "socketError", { message: "error while updating curTripId to null in requestDriverResponse", data: e }); SocketStore.emitByUserId(driverObj.driverId, "socketError", { message: "error while updating curTripId to null in requestDriverResponse", data: e }); }); }); } function checkSocketConnection(id) { const res = SocketStore.getByUserId(id); if (res.success && res.data.length) { return true; } else { return false; } } function nearByDriver(riderId) { console.log(riderId+ "this is rider id"); return new Promise((resolve, reject) => UserSchema.findOneAsync({ _id: riderId, userType: "1" }) .then(userDoc => { if (userDoc) { console.log(userDoc+"user info") // debug hereeeeee return UserSchema.findAsync({ $and: [ { gpsLoc: { $geoWithin: { $centerSphere: [userDoc.gpsLoc, config.radius] } } }, // { gpsLoc: { $geoWithin: { $center: [userDoc.gpsLoc, config.radius] } } }, { currTripId: null, currTripState: null }, { loginStatus: true }, { userType: "2" }, { isAvailable: true } ] }) .then(driverDoc => { if (driverDoc) { // console.log('hree list', driverDoc); return resolve(driverDoc); } else { // console.log('no nearByDriver driver found'); const err = new APIError( "no nearByDriver found", httpStatus.INTERNAL_SERVER_ERROR ); return reject(err); } }) .error(driverErr => { // console.log('error while searching near by driver '); reject(driverErr); }); } else { // console.log('no rider found with the given rider id'); const err = new APIError( "no rider found with the given id", httpStatus.INTERNAL_SERVER_ERROR ); return reject(err); } }) .error(e => { // console.log('error while searching rider '); const err = new APIError( `error while searching user ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); reject(err); }) ); } } export default requestTripHandler; <file_sep>import httpStatus from "http-status"; import APIError from "../helpers/APIError"; import config from "../../config/env"; import TripSchema from "../models/trip"; import TripRequestSchema from "../models/trip-request"; import UserSchema from "../models/user"; function tripDetails(req, res, next) { const limit = req.query.limit ? req.query.limit : config.limit; const pageNo = req.query.pageNo ? req.query.pageNo : 1; const skip = pageNo ? (pageNo - 1) * limit : config.skip; const filter = req.query.filter ? req.query.filter : config.tripFilter; TripSchema.getCount(filter) .then(totalTripRecords => { //eslint-disable-line const returnObj = { success: false, message: "no of trips are zero", data: null, meta: { totalNoOfPages: Math.ceil(totalTripRecords / limit), limit, currPageNo: pageNo, totalRecords: totalTripRecords.length } }; if (totalTripRecords < 1) { returnObj.success = true; returnObj.data = []; returnObj.meta.totalNoOfPages = 0; returnObj.meta.limit = limit; returnObj.meta.currPageNo = 0; returnObj.meta.totalRecords = 0; return res.send(returnObj); } if (skip > totalTripRecords) { const err = new APIError( "Request Page does not exists", httpStatus.NOT_FOUND ); return next(err); } TripSchema.list({ skip, limit, filter }) .then(tripData => { if (tripData.length !== 0) { for (let i = 0; i < tripData.length; i++) { //eslint-disable-line tripData[i] = transformReturnObj(tripData[i]); } returnObj.success = true; returnObj.message = "trip object retrieved"; returnObj.data = tripData; } else { returnObj.success = true; returnObj.message = "no trip details available"; } res.send(returnObj); }) .error(e => { const err = new APIError( `Error occured while retreiving trip object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); }) .error(e => { const err = new APIError( `Error occured while counting trip object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } function getOngoingTripDetails(req, res, next) { addDriverRider() .then(returnObj => { returnObj.success = true; returnObj.message = `no of trips are ${returnObj.data.length}`; returnObj.meta.totalRecords = `${returnObj.data.length}`; res.send(returnObj); }) .catch(err => { next(err); }); } function addDriverRider() { return new Promise((resolve, reject) => { TripSchema.find({ tripStatus: "onTrip" }) .then(ongoingTripRecords => { const returnObj = { success: true, message: "no of trips are zero", data: null, meta: { totalRecords: ongoingTripRecords.length } }; returnObj.data = ongoingTripRecords; const r1 = JSON.parse(JSON.stringify(returnObj)); addRider(r1) .then(responseObj => addDriver(responseObj)) .then(responseObj => resolve(responseObj)) .catch(err => { reject(err); }); }) .catch(err => { reject(err); }); // find catch }); } function getSpecificUserTripDetails(req, res, next) { const userId = req.params.userId; const returnObj = { success: false, message: "user Id is not defined", data: null }; if (userId) { TripSchema.find({ $or: [{ driverId: userId }, { riderId: userId }] }) .then(tripData => { if (tripData) { returnObj.success = true; returnObj.message = "user found and its corresponding trip details"; returnObj.data = tripData; const r1 = JSON.parse(JSON.stringify(returnObj)); addRider(r1) .then(responseObj => addDriver(responseObj)) .then(responseObj => { responseObj.success = true; responseObj.message = `no of trips are ${ responseObj.data.length }`; res.send(responseObj); }) .catch(err => { next(err); }); } else { returnObj.success = false; returnObj.message = "user trip details not found with the given id"; returnObj.data = null; res.send(returnObj); } // res.send(returnObj); }) .catch(err => { next(err); }); } else { res.send(returnObj); } } function getRecentReviewedTripDetails(req, res, next) { TripSchema.find({ tripStatus: "endTrip" }) .then(recentReviewedTripRecords => { const returnObj = { success: true, message: "no of trips are zero", data: null, meta: { totalRecords: recentReviewedTripRecords.length } }; returnObj.data = recentReviewedTripRecords; const r1 = JSON.parse(JSON.stringify(returnObj)); addRider(r1) .then(responseObj => addDriver(responseObj)) .then(responseObj => { responseObj.success = true; responseObj.message = `no of trips are ${responseObj.data.length}`; responseObj.meta.totalRecords = `${responseObj.data.length}`; res.send(responseObj); }) .catch(err => { next(err); }); }) .catch(err => { next(err); }); } function addRider(returnObj) { return new Promise((resolve, reject) => { Promise.all( returnObj.data.map((item, index) => UserSchema.findOneAsync({ _id: item.riderId }).then(result => { returnObj.data[index] = Object.assign({}, returnObj.data[index], { profileUrl: result.profileUrl, riderName: result.fname + result.lname }); return Promise.resolve(returnObj.data[index]); }) ) ) .then(rider => { if (rider) { console.log("Rider created", rider); //eslint-disable-line } return resolve(returnObj); }) .catch(err => { if (err) { console.log("error", err); //eslint-disable-line } return reject(returnObj); }); }); } function addDriver(returnObj) { return new Promise((resolve, reject) => { Promise.all( returnObj.data.map((item, index) => UserSchema.findOneAsync({ _id: item.driverId }).then(result => { returnObj.data[index] = Object.assign({}, returnObj.data[index], { driverName: result.fname + result.lname }); return Promise.resolve(returnObj.data[index]); }) ) ) .then(driver => { if (driver) { console.log("Driver created", driver); //eslint-disable-line } return resolve(returnObj); }) .catch(err => { if (err) { console.log("err", err); //eslint-disable-line } return reject(returnObj); }); }); } function createNewTrip(req, res, next) { const riderId = req.body.riderId; const driverId = req.body.driverId; UserSchema.findAsync({ $or: [ { $and: [{ userType: "rider" }, { _id: riderId }] }, { $and: [{ userType: "driver" }, { _id: driverId }] } ] }) .then(foundUserData => { //eslint-disable-line if (foundUserData.length !== 2) { const err = new APIError( "rider or driver does not exist", httpStatus.BAD_REQUEST ); return next(err); } if ( foundUserData[0].currTripId !== null || foundUserData[1].currTripId !== null ) { let errMsg = ""; if ( foundUserData[0].userType === "rider" && foundUserData[0].currTripId === null ) { errMsg += "Rider is On Trip"; } if ( foundUserData[1].userType === "driver" && foundUserData[1].currTripId === null ) { errMsg += "Driver is On Trip"; } const err = new APIError(errMsg, httpStatus.BAD_REQUEST); return next(err); } const tripObj = new TripSchema({ riderId: req.body.riderId, driverId: req.body.driverId, srcLoc: req.body.srcLoc ? req.body.srcLoc : [1, 2], destLoc: req.body.destLoc ? req.body.destLoc : [3, 4], pickUpAddress: req.body.pickUpAddress, destAddress: req.body.destAddress }); tripObj .saveAsync() .then(newTripObj => { const returnObj = { success: true, message: "trip object created", data: newTripObj, meta: null }; const tripRequest = new TripRequestSchema({ riderId: newTripObj.riderId, driverId: newTripObj.driverId, tripId: newTripObj._id, //eslint-disable-line srcLoc: newTripObj.srcLoc, destLoc: newTripObj.destLoc, pickUpAddress: newTripObj.pickUpAddress, destAddress: newTripObj.destAddress, tripRequestStatus: "completed", tripRequestIssue: "noIssue" }); tripRequest .saveAsync() .then(() => { UserSchema.updateAsync( { $or: [ { _id: newTripObj.riderId }, { _id: newTripObj.driverId } ] }, { $set: { currTripId: newTripObj._id, currTripState: "trip" } }, { multi: true } ) //eslint-disable-line .then(() => { res.send(returnObj); }) .error(e => { const err = new APIError( `Error occured while Updating User Object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); }) .error(e => { const err = new APIError( `Error occured while Saving Trip Request Object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); }) .error(e => { const err = new APIError( `Error occured while saving trip object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); }) .error(e => { const err = new APIError( `Error occured while finding rider or driver ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } function updateTrip(req, res, next) { const tripId = req.body._id; //eslint-disable-line const tripObj = { riderId: req.body.riderId, driverId: req.body.driverId, srcLoc: req.body.srcLoc ? req.body.srcLoc : [1, 2], destLoc: req.body.destLoc ? req.body.destLoc : [2, 2], pickUpAddress: req.body.pickUpAddress ? req.body.pickUpAddress : "new Dehli", destAddress: req.body.destAddress ? req.body.destAddress : "mumbai", tripAmt: req.body.tripAmt ? req.body.tripAmt : 0, tripIssue: req.body.tripIssue ? req.body.tripIssue : "noIssue", tripStatus: req.body.tripStatus ? req.body.tripStatus : "OnTrip", tripEndTime: req.body.tripEndTime ? req.body.tripEndTime : null, paymentMode: req.body.paymentMode ? req.body.paymentMode : "cash", taxiType: req.body.taxiType ? req.body.taxiType : "taxiMini", riderRatingByDriver: req.body.riderRatingByDriver ? req.body.riderRatingByDriver : 0, driverRatingByRider: req.body.driverRatingByRider ? req.body.driverRatingByRider : 0, riderReviewByDriver: req.body.riderReviewByDriver ? req.body.riderReviewByDriver : null, driverReviewByRider: req.body.driverReviewByRider ? req.body.driverReviewByRider : null, seatBooked: req.body.seatBooked ? req.body.seatBooked : 1 }; TripSchema.findOneAndUpdateAsync( { _id: tripId }, { $set: tripObj }, { new: 1, runValidators: true } ) .then(updatedTripObj => { //eslint-disable-line const returnObj = { success: false, message: "unable to update trip object as trip id provided didnt match", data: null, meta: null }; if (updatedTripObj) { returnObj.success = true; returnObj.message = "trip object updated"; returnObj.data = updatedTripObj; if (updatedTripObj.tripStatus === "endTrip") { UserSchema.updateAsync( { $or: [ { _id: updatedTripObj.riderId }, { _id: updatedTripObj.driverId } ] }, { $set: { currTripId: null, currTripState: null } }, { new: true, multi: true } ) .then(() => res.send(returnObj)) // sending the updated tripObj in the fronted .error(e => { const err = new APIError( `Error occured while updatating User Object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); return next(err); }); } } else { const err = new APIError( "Trip Id did not matched", httpStatus.BAD_REQUEST ); return next(err); } // res.send(returnObj); }) .error(e => { const err = new APIError( `Error occured while updatating trip object ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } function loadTripDetails(req, res, next) { const tripId = req.params.tripId; TripSchema.get(tripId) .then(tripData => { const returnObj = { success: true, message: "trip object found", data: transformReturnObj(tripData) }; res.send(returnObj); }) .error(e => next(e)); } function tripRevenueGraph(req, res, next) { let lastYearDate = new Date(); lastYearDate.setDate(1); lastYearDate.setMonth(lastYearDate.getMonth() - 11); lastYearDate = new Date(lastYearDate); const returnObj = { success: false, message: "no of trips avaliable", data: [], lastYearDate }; TripSchema.aggregateAsync([ { $match: { bookingTime: { $gt: lastYearDate } } }, { $project: { year: { $year: "$bookingTime" }, month: { $month: "$bookingTime" }, tripAmt: "$tripAmt", tripStatus: "$tripStatus" } }, { $match: { tripStatus: "endTrip" } }, { $group: { _id: "RevenueGraph", 1: { $sum: { $cond: [{ $eq: ["$month", 1] }, "$tripAmt", 0] } }, 2: { $sum: { $cond: [{ $eq: ["$month", 2] }, "$tripAmt", 0] } }, 3: { $sum: { $cond: [{ $eq: ["$month", 3] }, "$tripAmt", 0] } }, 4: { $sum: { $cond: [{ $eq: ["$month", 4] }, "$tripAmt", 0] } }, 5: { $sum: { $cond: [{ $eq: ["$month", 5] }, "$tripAmt", 0] } }, 6: { $sum: { $cond: [{ $eq: ["$month", 6] }, "$tripAmt", 0] } }, 7: { $sum: { $cond: [{ $eq: ["$month", 7] }, "$tripAmt", 0] } }, 8: { $sum: { $cond: [{ $eq: ["$month", 8] }, "$tripAmt", 0] } }, 9: { $sum: { $cond: [{ $eq: ["$month", 9] }, "$tripAmt", 0] } }, 10: { $sum: { $cond: [{ $eq: ["$month", 10] }, "$tripAmt", 0] } }, 11: { $sum: { $cond: [{ $eq: ["$month", 11] }, "$tripAmt", 0] } }, 12: { $sum: { $cond: [{ $eq: ["$month", 12] }, "$tripAmt", 0] } } } } ]) .then(revenueGraphDocs => { returnObj.success = true; returnObj.message = "revenue graph for the trip"; returnObj.data = revenueGraphDocs; res.send(returnObj); }) .error(e => { const err = new APIError( `Error occured while computing revenue graph ${e}`, httpStatus.INTERNAL_SERVER_ERROR ); next(err); }); } function transformReturnObj(tripData) { if (tripData instanceof Object) { tripData = tripData.toObject(); if (tripData.riderId) { tripData.rider = tripData.riderId; tripData.riderId = tripData.rider._id ? tripData.rider._id : null; //eslint-disable-line } if (tripData.driverId) { tripData.driver = tripData.driverId; tripData.driverId = tripData.driver._id ? tripData.driver._id : null; //eslint-disable-line } } return tripData; } export default { tripDetails, getOngoingTripDetails, getRecentReviewedTripDetails, createNewTrip, updateTrip, loadTripDetails, tripRevenueGraph, getSpecificUserTripDetails }; <file_sep>/* eslint-disable */ import SocketStore from '../../service/socket-store.js'; function dashboardHandler() { // console.log(socket, 'socket in dashboardHandler'); // SocketStore.display(); // SocketStore.emitByUserId( // '5<KEY>', // 'getDriverDetails', // 'test' // ); // const data = { // name: 'admin', // }; console.log(SocketStore); // socket.emit('getDriverDetails', data); // SocketStore.emitByUserId( // '5<KEY>', // 'getDriverDetails', // data // ); // SocketStore.emitByUserId(tripRequestObj.riderId, 'socketError', { message: 'error while updating tripRequestStatus based on distance', data: err }); // SocketStore.emitByUserId(tripRequestObj.driverId, 'socketError', { message: 'error while updating tripRequestStatus based on distance', data: err }); } export default dashboardHandler; <file_sep>import Promise from "bluebird"; import mongoose from "mongoose"; import config from "./config/env"; import app from "./config/express"; import socketServer from "./config/socket-server"; // promisify mongoose Promise.promisifyAll(mongoose); // connect to mongo db mongoose.connect( config.db, { bufferMaxEntries: 0, socketTimeoutMS: 0, keepAlive: true, useUnifiedTopology: true, useCreateIndex: true, useNewUrlParser: true, useFindAndModify: false }, () => { if (config.env === "test") { mongoose.connection.db.dropDatabase(); } } ); mongoose.connection.on("error", () => { throw new Error(`unable to connect to database: ${config.db}`);s }); const debug = require("debug")("Taxi-app-backend-web-dashboard:index"); // starting socket server socketServer.startSocketServer(app); // listen on port config.port app.listen(process.env.PORT || config.port, () => { console.log( `server started on Port: ${config.port} Environment:${config.env}` ); }); export default app; <file_sep>import httpStatus from "http-status"; import jwt from "jsonwebtoken"; import { get } from "lodash"; import APIError from "../helpers/APIError"; import { fetchReturnObj } from "../service/transform-response"; import config from "../../config/env"; import Notification from '../models/notification'; import User from '../models/user'; import sendNotification from "../service/pushNotification"; function createNotification(type, data) { switch (type) { case 'followed': return saveInDB(data); case 'post': return makePostNotificationData(data); } } function makePostNotificationData(notifyData) { User.find({ followings: { $in: [notifyData.userId] } }) .then(result => { let allPromises; const data = result.map(item => { sendNotification(item._id, 'New post by your friend'); return new Notification({ userId: item._id, type: 'post', link: notifyData.postId, toDisplayUser: notifyData.userId, date: Date.now() }) }) saveInDB(data); }) .catch(error => { console.log(error); }) } function saveInDB(data) { if (Array.isArray(data)) { return Notification.insertMany(data); } sendNotification(data.userId, 'You have a new follower'); const notification = new Notification(data); return notification.saveAsync().catch(error => { console.log(error) }); } function getNotification(req, res, next) { let notifications; Notification.find({ userId: req.user._id }) .populate('toDisplayUser', 'fname lname userName profileUrl') .then(result => { notifications = result; Notification.updateMany( { userId: req.user._id }, { hasRead: true } ) .then(value => { return res.send({ success: true, message: 'Your Notification List', data: notifications }) }) }) .catch(error => { return res.send({ success: false, message: 'Failed to fetch your notification list', data: error }) }) } function markNotificationAsRead(req, res, next) { Notification.findOneAndUpdate({ _id: req.params.id }, { hasRead: true }).then(result => { if (!result) { return res.send({ success: false, message: 'Notification does not exist', data: {} }) } return res.send({ success: true, message: 'Marked as read', data: {} }) }).catch(err => { return res.send({ success: false, message: 'Failed to mark as read', data: err }) }) } function deleteNotification(req, res, next) { Notification.findOneAndDelete({ _id: req.params.id }).then(result => { if (!result) { return res.send({ success: false, message: 'Notification does not exist', data: {} }) } return res.send({ success: true, message: 'Notification deleted', data: {} }) }).catch(err => { return res.send({ success: false, message: 'Failed to delete notification', data: err }) }) } export default { createNotification, getNotification, markNotificationAsRead, deleteNotification }; <file_sep>/* eslint-disable */ import ServerConfig from '../models/serverConfig'; //eslint-disable-line import User from '../models/user'; function mobileVerify(req, res, next) { } function emailVerify(req, res, next) { User.findOneAsync({ email: req.query.email }) //eslint-disable-next-line .then(foundUser => { if (foundUser) { const host=req.get('host'); console.log(req.protocol+":/"+req.get('host')); if((req.protocol+"://"+req.get('host'))==("http://"+host)) { console.log("Domain is matched. Information is from Authentic email"); if(req.query.check === foundUser.otp) { User.findOneAndUpdateAsync({ email: req.query.email }, { $set: { emailVerified: true } }, { new: true }) //eslint-disable-line .then((updateUserObj) => { //eslint-disable-line if (updateUserObj) { const returnObj = { success: true, message: 'Email verified', data: {} }; // returnObj.data.user = updateUserObj; returnObj.success = true; return res.send(returnObj); } }) .error((e) => { const err = new APIError(`error in updating user details while login ${e}`, httpStatus.INTERNAL_SERVER_ERROR); next(err); }); console.log("Email is verified"); res.end("<h1>Email is been Successfully verified</h1>"); } else { console.log("Email is not verified"); res.end("<h1>Bad Request</h1>"); } } } }); } export default { mobileVerify, emailVerify }; <file_sep>import mongoose from 'mongoose'; const Schema = mongoose.Schema; /** * AppConfig Schema */ const ServerConfigSchema = new mongoose.Schema({ type: { type: Schema.Types.Mixed }, key: { type: String, required: true, unique: true }, value: { type: Schema.Types.Mixed }, }); export default mongoose.model('ServerConfig', ServerConfigSchema); <file_sep>import httpStatus from "http-status"; import mongoose from "mongoose"; import APIError from "../helpers/APIError"; import UserSchema from "./user"; const debug = require("debug")("Taxi-app-backend-web-dashboard: trip model"); const Schema = mongoose.Schema; const TripSchema = new Schema({ riderId: { type: Schema.Types.ObjectId, ref: "User", default: null }, driverId: { type: Schema.Types.ObjectId, ref: "User", default: null }, srcLoc: { type: [Number], index: "2d" }, destLoc: { type: [Number], index: "2d" }, pickUpAddress: { type: String, default: null }, destAddress: { type: String, default: null }, latitudeDelta: { type: Number, default: 0.0123 }, longitudeDelta: { type: Number, default: 0.0123 }, paymentMode: { type: String, default: "CASH" }, paymentStatus: { type: String, default: null }, receiptUrl: { type: String, default: null }, tripAmt: { type: Number, default: 0 }, tripDist: { type: Number, default: 0 }, bookingTime: { type: Date, default: Date.now }, tripEndTime: { type: Date, default: null }, travelTime: { type: Number, default: 0 }, taxiType: { type: String, default: "TaxiGo" }, riderRatingByDriver: { type: Number, default: 0 }, driverRatingByRider: { type: Number, default: 0 }, riderReviewByDriver: { type: String, default: null }, driverReviewByRider: { type: String, default: null }, seatBooked: { type: Number, default: 0 }, tripStatus: { type: String, default: "onTrip" }, tripIssue: { type: String, default: "noIssue" }, roadMapUrl: { type: String, default: null } }); // TripSchema.path("riderId").validate((riderId, respond) => { // debug(`inside validator with riderId value ->${riderId}`); // return UserSchema.findByIdAsync(riderId).then(riderData => { // if (riderData) { // return respond(true); // } else { // debug(`rider validation failed ${riderData}`); // return respond(false); // } // }); // }, "Invalid Rider Id"); // TripSchema.path("driverId").validate((driverId, respond) => { // debug(`inside validator with driverId value ->${driverId}`); // return UserSchema.findByIdAsync(driverId).then(driverData => { // if (driverData) { // return respond(true); // } else { // debug(`driver validation failed ${driverData}`); // return respond(false); // } // }); // }, "Invalid DriverId"); TripSchema.statics = { /** * List users in descending order of 'createdAt' timestamp. * @param {number} skip - Number of users to be skipped. * @param {number} limit - Limit number of users to be returned. * @returns {Promise<User[]>} */ list({ skip, limit, filter } = {}) { let searchObj = {}; switch (filter) { case "Ongoing": searchObj = {}; searchObj.tripStatus = "onTrip"; break; case "Completed": searchObj = {}; searchObj.tripStatus = "endTrip"; break; default: searchObj = {}; } return this.find(searchObj) .sort({ _id: -1 }) .select("-__v") .skip(skip) .limit(limit) .populate("riderId driverId") .execAsync(); }, get(tripId) { return this.findById(tripId) .populate("riderId driverId") .execAsync() .then(tripObj => { if (tripObj) { return tripObj; } const err = new APIError("No such trip exists!", httpStatus.NOT_FOUND); return Promise.reject(err); }); }, getCount(filter) { let searchObj = {}; switch (filter) { case "Ongoing": searchObj = {}; searchObj.tripStatus = "onTrip"; break; case "Completed": searchObj = {}; searchObj.tripStatus = "endTrip"; break; default: searchObj = {}; } return this.count(searchObj).execAsync(); } }; export default mongoose.model("trip", TripSchema); <file_sep>import mongoose from "mongoose"; const Schema = mongoose.Schema; const PostSchema = new Schema({ userId: { type: Schema.Types.ObjectId, ref: "User", required: true }, tripId: { type: Schema.Types.ObjectId, ref: "trip" }, caption: { type: String, default: null }, imageUrl: { type: String, default: null }, tags: { type: [Schema.Types.ObjectId] }, longAddress: { type: String, default: null }, shortAddress: { type: String, default: null }, loc: { type: [Number], index: "2d" }, postedAt: { type: Date, default: Date.now() } }); export default mongoose.model("post", PostSchema);
d33164ff10f9b8c2ff9a1a63e648d56191eafdf0
[ "JavaScript", "Markdown" ]
39
JavaScript
simranve/klixx_28042020
2d40598646e25e4cb539fa5744fcaa152633c6f1
edf45fa32f1b1f8cd0b12379b7fb273696bbe1c4
refs/heads/master
<repo_name>jimon221/TeethWhiteningSimulator<file_sep>/js/common.js var tooth_list = [ //ホワイトニング前 [ //上 [ {id: 0, color_id: 0,}, {id: 1, color_id: 0,}, {id: 2, color_id: 0,}, {id: 3, color_id: 0,}, {id: 4, color_id: 0,}, {id: 5, color_id: 0,}, {id: 6, color_id: 0,}, {id: 7, color_id: 0,}, ], //下 [ {id: 0, color_id: 0,}, {id: 1, color_id: 0,}, {id: 2, color_id: 0,}, {id: 3, color_id: 0,}, {id: 4, color_id: 0,}, {id: 5, color_id: 0,}, {id: 6, color_id: 0,}, {id: 7, color_id: 0,}, ] ], //ホワイトニング後 [ //上 [ {id: 0, color_id: 0,}, {id: 1, color_id: 0,}, {id: 2, color_id: 0,}, {id: 3, color_id: 0,}, {id: 4, color_id: 0,}, {id: 5, color_id: 0,}, {id: 6, color_id: 0,}, {id: 7, color_id: 0,}, ], //下 [ {id: 0, color_id: 0,}, {id: 1, color_id: 0,}, {id: 2, color_id: 0,}, {id: 3, color_id: 0,}, {id: 4, color_id: 0,}, {id: 5, color_id: 0,}, {id: 6, color_id: 0,}, {id: 7, color_id: 0,}, ] ] ] var color_list = [ {id: 0, name: "W1", code: "#F7F3F4"}, {id: 1, name: "W2", code: "#F6EFE5"}, {id: 2, name: "W3", code: "#F8F3F0"}, {id: 3, name: "B1", code: "#F6EDDC"}, {id: 4, name: "A1", code: "#F3E2C6"}, {id: 5, name: "B2", code: "#F7E7C3"}, {id: 6, name: "D2", code: "#F8ECC6"}, {id: 7, name: "A2", code: "#F7E6CA"}, {id: 8, name: "C1", code: "#EDDDBC"}, {id: 9, name: "C2", code: "#ECD8B5"}, {id: 10, name: "D4", code: "#F2DFB5"}, {id: 11, name: "A3", code: "#F3DDAC"}, {id: 12, name: "D3", code: "#ECD8B5"}, {id: 13, name: "B3", code: "#F3DDAC"}, {id: 14, name: "A3.5", code: "#E8CF97"}, {id: 15, name: "B4", code: "#F1D898"}, {id: 16, name: "C3", code: "#E6D4A2"}, {id: 17, name: "A4", code: "#E6C576"}, {id: 18, name: "C4", code: "#E7C887"}, ] var disp_color_list = [ [0, 1, 2], [4, 7, 11, 14, 17], [3, 5, 13, 15], [8, 9, 16, 18], [6, 12, 10] ] var soeji_list = [4, 3, 2, 1, 1, 2, 3, 4]; var current_color_id = 0; window.onload = function () { make_palette(); make_mihon(); make_disp(); get_color(0); } function make_disp() { try { make_before_disp(); make_after_disp(); } catch (e) { console.log(tooth_list); console.log(e); reset_disp(0); reset_disp(1); } } function make_before_disp() { const before_disp = document.getElementById('before_disp'); var ret = ""; ret = ret + "<table id=\"nav\">"; ret = ret + "<tr>"; soeji_list.forEach(function (soeji) { ret = ret + "<td class='soeji'>" + soeji + "</td>"; }); ret = ret + "</tr>"; ret = ret + "<tr>"; tooth_list[0][0].forEach(function (color) { ret = ret + "<td class='tooth' onclick=\"set_color(0,0," + color.id + ")\" style=\"background: " + color_list[color.color_id].code + "\">" + color_list[color.color_id].name + "</td>"; }); ret = ret + "</tr>"; ret = ret + "<tr>"; tooth_list[0][1].forEach(function (color) { ret = ret + "<td class='tooth' onclick=\"set_color(0,1," + color.id + ")\" style=\"background: " + color_list[color.color_id].code + "\">" + color_list[color.color_id].name + "</td>"; }); ret = ret + "</tr>"; ret = ret + "<tr>"; soeji_list.forEach(function (soeji) { ret = ret + "<td class='soeji'>" + soeji + "</td>"; }); ret = ret + "</tr>"; ret = ret + "</table>"; before_disp.innerHTML = ret; } function make_after_disp() { const after_disp = document.getElementById('after_disp'); var ret = ""; ret = ret + "<table id=\"nav\">"; ret = ret + "<tr>"; soeji_list.forEach(function (soeji) { ret = ret + "<td class='soeji'>" + soeji + "</td>"; }); ret = ret + "</tr>"; ret = ret + "<tr>"; tooth_list[1][0].forEach(function (color) { ret = ret + "<td class='tooth' onclick=\"set_color(1,0," + color.id + ")\" style=\"background: " + color_list[color.color_id].code + "\">" + color_list[color.color_id].name + "</td>"; }); ret = ret + "</tr>"; ret = ret + "<tr>"; tooth_list[1][1].forEach(function (color) { ret = ret + "<td class='tooth' onclick=\"set_color(1,1," + color.id + ")\" style=\"background: " + color_list[color.color_id].code + "\">" + color_list[color.color_id].name + "</td>"; }); ret = ret + "<tr>"; soeji_list.forEach(function (soeji) { ret = ret + "<td class='soeji'>" + soeji + "</td>"; }); ret = ret + "</tr>"; ret = ret + "</table>"; after_disp.innerHTML = ret; } function sync_disp() { tooth_list[0][0].forEach(function (color) { tooth_list[1][0][color.id].color_id = color.color_id; }); tooth_list[0][1].forEach(function (color) { tooth_list[1][1][color.id].color_id = color.color_id; }); make_after_disp(); } function make_palette() { const palette = document.getElementById('palette'); var ret = ""; ret = ret + "<table id=\"nav\">"; disp_color_list.forEach(function (temp_list) { ret = ret + "<tr>"; temp_list.forEach(function (color_id) { color = color_list[color_id]; ret = ret + "<td class='tooth' id='palette_" + color.id + "' onclick='get_color(" + color.id + ")' style='background:" + color.code + "'>" + color.name + "</td>"; }); ret = ret + "</tr>"; }); ret = ret + "</table>"; palette.innerHTML = ret; } function make_mihon() { const palette = document.getElementById('mihon'); var ret = ""; ret = ret + "<table id=\"nav\">"; color_list.forEach(function (color) { ret = ret + "<td class='tooth' id='mihon_" + color.id + "' onclick='get_color(" + color.id + ")' style='background:" + color.code + "'>" + color.name + "</td>"; }); ret = ret + "</table>"; palette.innerHTML = ret; } function up_disp() { tooth_list[1][0].forEach(function (color) { tooth_list[1][0][color.id].color_id += 1; if (tooth_list[1][0][color.id].color_id == color_list.length) { tooth_list[1][0][color.id].color_id -= 1; } }); tooth_list[1][1].forEach(function (color) { tooth_list[1][1][color.id].color_id += 1; if (tooth_list[1][1][color.id].color_id == color_list.length) { tooth_list[1][1][color.id].color_id -= 1; } }); make_after_disp(); } function down_disp() { tooth_list[1][0].forEach(function (color) { tooth_list[1][0][color.id].color_id -= 1; if (tooth_list[1][0][color.id].color_id < 0) { tooth_list[1][0][color.id].color_id += 1; } }); tooth_list[1][1].forEach(function (color) { tooth_list[1][1][color.id].color_id -= 1; if (tooth_list[1][1][color.id].color_id < 0) { tooth_list[1][1][color.id].color_id += 1; } }); make_after_disp(); } function set_color(zengo, jouge, id) { select_color = tooth_list[zengo][jouge][id]; select_color.color_id = current_color_id; make_disp(); } function get_color(id) { current_color_id = id; reset_paret(); const palette = document.getElementById('palette_' + id); palette.style.outline = "1px solid #d76787"; palette.style.outlineOffset = "-1px"; const mihon = document.getElementById('mihon_' + id); mihon.style.outline = "1px solid #d76787"; mihon.style.outlineOffset = "-1px"; } function reset_disp(zengo) { tooth_list[zengo][0].forEach(function (select_color) { select_color.color_id = 0; }); tooth_list[zengo][1].forEach(function (select_color) { select_color.color_id = 0; }); if (zengo == 0) { make_before_disp(); } else { make_after_disp(); } } function reset_paret() { color_list.forEach(function (color) { const palette = document.getElementById('palette_' + color.id); palette.style.outline = ""; palette.style.outlineOffset = ""; const mihon = document.getElementById('mihon_' + color.id); mihon.style.outline = ""; mihon.style.outlineOffset = ""; }); } function save_disp() { var ret = ""; tooth_list.forEach(function (temp_list) { temp_list.forEach(function (temp_list2) { temp_list2.forEach(function (tooth) { ret = "" + ret + tooth.color_id.toString(color_list.length); }); }); }); document.getElementById("pass").value = ret; } function load_disp() { var password = document.getElementById("pass").value; var count = 0; tooth_list.forEach(function (temp_list) { temp_list.forEach(function (temp_list2) { temp_list2.forEach(function (tooth) { tooth.color_id = parseInt(password.charAt(count), color_list.length); count++; }); }); }); make_disp(); }<file_sep>/readme.md https://jimon221.github.io/TeethWhiteningSimulator/
874affc43f95c46c288926efde7d094ef196049e
[ "JavaScript", "Markdown" ]
2
JavaScript
jimon221/TeethWhiteningSimulator
06bf078cf23e40a21d1c97ef6badf829864ef2ae
db22832fa7b9a29a3305c2ae2b8440a21d85dbb9
refs/heads/master
<repo_name>etomanon/blog<file_sep>/client/src/redux/initialState.ts import { initialPost, PostState } from "./post/reducers"; export const initialState = { post: initialPost }; export interface AppState { post: Readonly<PostState>; } <file_sep>/client/src/ky/ky.ts import kyDefault from "ky"; const apiUrl = `http://localhost:8080`; const ky = kyDefault.extend({ prefixUrl: `${apiUrl}/api/` }); export default ky; <file_sep>/client/src/components/text/styled/TextHeader.ts import styled from "styled-components"; import { display, DisplayProps, space, SpaceProps, textAlign, TextAlignProps } from "styled-system"; // eslint-disable-next-line export const TextHeader = styled.div< DisplayProps & SpaceProps & TextAlignProps >` display: inline-block; font-size: 2.2rem; ${display} ${space}; ${textAlign}; `; <file_sep>/client/src/theme/theme.ts export const theme = { colors: { primary: "#4B367C", primary75: "rgba(75, 54, 124, .75)", primary50: "rgba(75, 54, 124, .5)", primary25: "rgba(75, 54, 124, .25)", error: "#E21212", text: "#333", greyLight: "#D3D3D3", black: "#000" }, breakpoints: ["40em", "76em"], fontSizes: ["1.2rem", "1.4rem", "1.6rem", "2rem", "2.4rem", "3.2rem"], space: [0, "0.4rem", "0.8rem", "1.6rem", "3.2rem"] }; export type Theme = typeof theme; <file_sep>/server/api/routes.js module.exports = function(app) { const controller = require('./controller'); app.route('/api/post') .get(controller.getAll) .post(controller.create); app.route("/api/post/:id") .get(controller.get) .delete(controller.delete); app.route("/api/categories") .get(controller.getCategories); };<file_sep>/client/src/types/post.ts export interface PostProps { id: string; title: string; categories: string[]; content: string; dateCreated: number; } <file_sep>/client/src/modules/postCreate/_formik.ts import * as Yup from "yup"; export const ValidationSchema = Yup.object().shape({ id: Yup.string().notRequired(), title: Yup.string() .max(64, "Max 64 characters") .required("Required"), categories: Yup.array() .of<string>(Yup.string()) .min(1, "Add at least one category"), content: Yup.string() .max(1500, "Max 1500 characters") .required("Required") }); export const initialValues = { title: "", categories: [], content: "" }; export type FormValues = Yup.InferType<typeof ValidationSchema>; <file_sep>/client/src/redux/reducerRoot.ts import { combineReducers } from "redux"; import { reducerPost } from "./post/reducers"; export const reducerRoot = combineReducers({ post: reducerPost }); <file_sep>/client/src/utils/_category.ts import ky from "../ky/ky"; export const categoriesLoad = () => ky.get("categories").json<string[]>(); <file_sep>/client/src/components/wrapper/styled/Wrapper.ts import styled from "styled-components"; import { space, SpaceProps } from "styled-system"; export const WrapperRouter = styled.main<SpaceProps>` max-width: 85em; width: 100%; flex: 1; ${space} `; export const WrapperApp = styled.div<SpaceProps>` display: flex; flex-direction: column; min-height: 100vh; `; <file_sep>/client/src/components/post/styled/Post.ts import styled from "styled-components"; import { Delete } from "styled-icons/material/Delete"; interface ShowProps { show: boolean; } export const PostDelete = styled(Delete)<ShowProps>` position: absolute; right: 0; top: 1rem; color: ${({ theme }) => theme.colors.greyLight}; height: 3rem; opacity: ${({ show }) => (show ? 1 : 0)}; transition: opacity 0.2s ease-in, color 0.2s ease-in; &:hover { color: ${({ theme }) => theme.colors.error}; } @media (hover: none) { color: ${({ theme }) => theme.colors.error}; } `; export const PostLine = styled.div<ShowProps>` width: ${({ show }) => (show ? "100%" : "0")}; left: 0; bottom: -4px; height: 2px; background: ${({ theme }) => theme.colors.primary}; transition: 0.2s ease-in width; `; export const PostWrapper = styled.div` position: relative; display: flex; flex-direction: column; padding: 1rem 1.4rem; margin-bottom: 4px; width: 100%; cursor: pointer; &::after { content: ""; position: absolute; } &:hover { ${PostDelete} { opacity: 1; } ${PostLine} { width: 100%; } } `; <file_sep>/client/src/components/control/styled/Button.ts import styled from "styled-components"; import { width, WidthProps } from "styled-system"; export const Button = styled.button<WidthProps>` padding: 1rem 1.5rem; font-size: ${({ theme }) => theme.fontSizes[3]}; text-transform: uppercase; letter-spacing: 2px; background: ${({ theme }) => theme.colors.primary}; border: 2px solid ${({ theme }) => theme.colors.primary}; color: #fff; border-radius: 4px; cursor: pointer; transition: all 0.2s ease-in; &:focus { outline: none; box-shadow: none; } &:active { border-style: solid; } &:hover { color: ${({ theme }) => theme.colors.primary}; background: #fff; } ${width} `; <file_sep>/client/src/components/control/styled/Link.ts import styled from "styled-components"; import { space, SpaceProps } from "styled-system"; import { Link as RouterLink } from "react-router-dom"; export const Link = styled(RouterLink)<SpaceProps>` color: ${({ theme }) => theme.colors.primary}; font-size: ${({ theme }) => theme.fontSizes[0]}; font-weight: 700; text-transform: uppercase; text-decoration: none; &:hover { text-decoration: underline; } ${space} `; <file_sep>/server/api/controller.js const low = require("lowdb"); // const uuidv1 = require("uuid/v1"); const db = low("api/db.json"); exports.getAll = function(req, res) { const posts = db .get("posts") .sortBy("createdDate") .reverse() .value(); res.send(posts); }; exports.create = function(req, res) { if (!req.body.title) { res.status(422).send("'title' field must be present in json"); } else { const written = db .get("posts") .push({ id: req.body.id, title: req.body.title, categories: req.body.categories, content: req.body.content, dateCreated: new Date().getTime() }) .last() .write(); res.send(written); } }; exports.get = function(req, res) { const id = req.params.id; if (!id) { res.status(422).send("'id' must be present in params"); } else { const post = db.get("posts").value({ id: id }); if (post.length === 0) { res.status(404).send("id not found, nothing to delete"); } else { res.send(); } } }; exports.delete = function(req, res) { const id = req.params.id; if (!id) { res.status(422).send("'id' must be present in params"); } else { const deleted = db .get("posts") .remove({ id: id }) .write(); if (deleted.length === 0) { res.status(404).send("id not found, nothing to delete"); } else { res.send(); } } }; exports.getCategories = function(req, res) { const posts = db .get("posts") .map("categories") .flatten() .union() .value(); res.send(posts); }; <file_sep>/client/src/components/loader/styled/Loader.ts import styled from "styled-components"; export const LoaderWrapper = styled.div` > div { position: fixed; height: 5px; display: block; width: 100%; background: ${({ theme }) => theme.colors.primary}; margin: 0; top: 0; left: 0; overflow: hidden; z-index: 100; > div { background: #fff; &:before { content: ""; position: absolute; background-color: inherit; top: 0; left: 0; bottom: 0; height: 100%; will-change: left, right; animation: animate 2s cubic-bezier(0.65, 0.815, 0.735, 0.395) infinite; } &:after { content: ""; position: absolute; background-color: inherit; top: 0; left: 0; bottom: 0; height: 100%; will-change: left, right; animation: animate-short 2s cubic-bezier(0.165, 0.84, 0.44, 1) infinite; animation-delay: 1.15s; } } } @keyframes animate { 0% { left: -35%; right: 100%; } 60% { left: 100%; right: -90%; } 100% { left: 100%; right: -90%; } } @keyframes animate-short { 0% { left: -200%; right: 100%; } 60% { left: 107%; right: -8%; } 100% { left: 107%; right: -8%; } } `; <file_sep>/client/src/hook/useCategories.ts import { useState, useEffect } from "react"; import { useSnackbar } from "notistack"; import { Option } from "../types/select"; import { PostProps } from "../types/post"; import { categoriesLoad } from "../utils/_category"; // load categories for Formik Select // pass posts to update categories if posts change export const useCategories = (posts?: PostProps[]) => { const { enqueueSnackbar } = useSnackbar(); const [categories, setCategories] = useState<Option[] | undefined>([]); useEffect(() => { categoriesLoad() .then(categories => { const options = categories.map(c => ({ label: c, value: c })); if (options.length === 0) { setCategories(undefined); } else { setCategories(options); } }) .catch(() => enqueueSnackbar("Could not load categories. Please try again", { variant: "error" }) ); }, [enqueueSnackbar, posts]); return categories; }; <file_sep>/client/src/redux/post/reducers.ts import { ActionType, getType } from "typesafe-actions"; import { Reducer } from "redux"; import { pending, rejected, StateCommon } from "../utils/common"; import { PostProps } from "../../types/post"; import * as actions from "./actions"; export interface PostState extends StateCommon { posts: PostProps[]; } export const initialPost: PostState = { posts: [], pending: false, error: false }; export const reducerPost: Reducer<PostState, PostActions> = ( state = initialPost, action ) => { switch (action.type) { case getType(actions.postGetAsync.request): case getType(actions.postDeleteAsync.request): return pending(state); case getType(actions.postDeleteAsync.failure): case getType(actions.postGetAsync.failure): return rejected(state); case getType(actions.postGetAsync.success): return { ...state, posts: action.payload, pending: false }; case getType(actions.postDeleteAsync.success): return { ...state, posts: state.posts.filter(p => p.id !== action.payload), pending: false }; default: return state; } }; export type PostActions = ActionType<typeof actions>; <file_sep>/client/src/components/formik/styled/Formik.ts import styled, { css } from "styled-components"; const baseStyles = css` padding: 1rem 1.5rem; background: #fff; width: 100%; border: 2px solid transparent; outline: none; border-bottom: 2px solid ${({ theme }) => theme.colors.primary}; &:focus { outline: none; border: 2px solid ${({ theme }) => theme.colors.primary}; border-radius: 4px; } `; export const Input = styled.input` ${baseStyles} `; export const TextArea = styled.textarea` ${baseStyles} resize: none; padding: 1rem; border: 2px solid ${({ theme }) => theme.colors.greyLight}; border-radius: 4px; &:focus { border: 2px solid ${({ theme }) => theme.colors.primary}; } `; <file_sep>/README.md This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). # Server start * `cd server` * `yarn install` * `yarn start` * started on localhost:8080 # Client start * `cd client` * `yarn install` * `yarn start` * started on localhost:3000<file_sep>/client/src/redux/post/actions.ts import { createAsyncAction } from "typesafe-actions"; import { Dispatch } from "redux"; import ky from "../../ky/ky"; import { PostProps } from "../../types/post"; export const postGetAsync = createAsyncAction( "POST_GET_REQUEST", "POST_GET_SUCCESS", "POST_GET_FAILURE" )<void, PostProps[], void>(); const postGetApi = async (): Promise<PostProps[] | null> => { try { const post = await ky.get("post").json<PostProps[]>(); return post; } catch (error) { console.log(error); return null; } }; // get all posts export const postGet = () => async (dispatch: Dispatch) => { dispatch(postGetAsync.request()); // Get data const post = await postGetApi(); if (post) { dispatch(postGetAsync.success(post)); } else { dispatch(postGetAsync.failure()); } }; const postDeleteApi = async (id: string): Promise<boolean> => { try { await ky.delete(`post/${id}`); return true; } catch (error) { console.log(error); return false; } }; export const postDeleteAsync = createAsyncAction( "POST_DELETE_REQUEST", "POST_DELETE_SUCCESS", "POST_DELETE_FAILURE" )<string, string, void>(); // delete single post export const postDelete = (id: string) => async (dispatch: Dispatch) => { dispatch(postDeleteAsync.request(id)); const postDeleted = await postDeleteApi(id); if (postDeleted) { dispatch(postDeleteAsync.success(id)); } else { dispatch(postDeleteAsync.failure()); } }; <file_sep>/client/src/modules/home/_utils.ts export type ViewsProps = "Normal" | "Category"; export const views: ViewsProps[] = ["Normal", "Category"]; <file_sep>/client/src/redux/post/selectors.ts import { AppState } from "../initialState"; export const selectorPost = (state: AppState) => state.post; <file_sep>/client/src/theme/global.ts import { createGlobalStyle } from "styled-components"; export const GlobalStyles = createGlobalStyle` html { font-size: 62.5%; } html, body { margin: 0; padding: 0; text-rendering: optimizeLegibility; font-family: 'Montserrat', 'Roboto', sans-serif; -webkit-font-smoothing: antialiased; overflow-x: hidden; color: ${({ theme }) => theme.colors.text} } body { font-size: 1.6rem; } .MuiSnackbarContent-message, textarea, input { font-family: 'Montserrat', 'Roboto', sans-serif; font-size: 1.6rem; } h1, h2, h3, h4, h5, h6 { line-height: 1; } ul { margin-top: 0; margin-bottom: 0; } ::placeholder { color: ${({ theme }) => theme.colors.text}; font-family: 'Montserrat', 'Roboto', sans-serif; } `; <file_sep>/client/src/components/header/styled/Header.ts import styled from "styled-components"; import { space, SpaceProps, flexDirection, FlexDirectionProps } from "styled-system"; export const HeaderWrapper = styled.header<SpaceProps & FlexDirectionProps>` display: flex; align-items: center; justify-content: center; flex-wrap: wrap; width: 100%; color: ${({ theme }) => theme.colors.primary}; ${space} ${flexDirection} `; export const Logo = styled.img` display: flex; `; <file_sep>/client/src/components/control/styled/NavLink.ts import styled from "styled-components"; import { NavLink as ReactNavLink } from "react-router-dom"; import { space, SpaceProps } from "styled-system"; export const NavLink = styled(ReactNavLink).attrs({ activeClassName: "active-navlink" })<SpaceProps>` color: ${({ theme }) => theme.colors.primary}; text-decoration: none; &.${p => p.activeClassName} { font-weight: 500; } padding-bottom: 0.5rem; border-bottom: 1px solid transparent; ${space}; &:hover { border-bottom: 1px solid ${({ theme }) => theme.colors.primary}; } `; <file_sep>/client/src/components/category/styled/Category.ts import styled from "styled-components"; import { space, SpaceProps } from "styled-system"; export const Category = styled.div<SpaceProps>` border: ${({ theme }) => `2px solid ${theme.colors.primary}`}; border-radius: 4px; padding: 0.5rem 1rem; ${space}; `; <file_sep>/client/src/components/animation/styled/Animation.ts import styled from "styled-components"; interface FadeProps { state: string; } export const FadeIn = styled.div<FadeProps>` width: 100%; transition: ease-in 0.2s; opacity: ${({ state }) => (state === "entered" ? 1 : 0)}; transform: ${({ state }) => state === "entered" ? "translate(0, 0)" : `translate(0, -2rem)`}; display: ${({ state }) => (state === "exited" ? "none" : "block")}; `;
2842089ebcd2f348e7fb8eb72af703d9c565d35f
[ "JavaScript", "TypeScript", "Markdown" ]
27
TypeScript
etomanon/blog
311869666a6538d598f57ff349d1e03e3875bb04
0ea36b4d34fb83a6fd8324a5ebe14bf87270b9b7
refs/heads/main
<file_sep>using System; using System.Collections; using System.Collections.Generic; using TMPro; using UnityEngine; public class Movement : MonoBehaviour { [SerializeField] private Camera playerCamera; [SerializeField] private Transform playerBody; [SerializeField] private float mouseSensitivity = 100f; [SerializeField] private CharacterController characterController; [SerializeField] private float movementSpeed = 12f; [SerializeField] private float InteractionRange = 1.8f; [SerializeField] private TextMeshProUGUI messageText; [SerializeField] private GameObject messagePanel; public ObjectInteractable focus; public MultipleInteracionConroller focusVariable; private int rayLayerMask; private float xRotation = 0f; [SerializeField] private MoveableObject moveableObject; private Animator animator; private const string animBoolName = "isOpen_Obj_"; private PauseMenu pauseMenu; void Start() { Cursor.lockState = CursorLockMode.Locked; LayerMask iRayLM = LayerMask.NameToLayer("InteractRaycast"); rayLayerMask = 1 << iRayLM.value; pauseMenu = GameObject.Find("EventSystem").GetComponent<PauseMenu>(); } void Update() { MouseLook(); MoventControl(); RaycastInteraction(); } private void RaycastInteraction() { RaycastHit hit; Vector3 rayOrigin = playerCamera.ViewportToWorldPoint(new Vector3(0.5f, 0.5f, 0.5f)); if (Physics.Raycast(rayOrigin, playerCamera.transform.forward, out hit, InteractionRange, rayLayerMask)) { focus = hit.collider.GetComponent<ObjectInteractable>(); focusVariable = hit.collider.GetComponent<MultipleInteracionConroller>(); moveableObject = hit.collider.GetComponent<MoveableObject>(); if (moveableObject != null) { animator = hit.collider.GetComponent<Animator>(); } if (focus != null) { if (focus.allowInteract) { messagePanel.SetActive(true); bool isOpen = false; string animBoolNameNum = "true"; if (moveableObject != null) { animBoolNameNum = animBoolName + moveableObject.objectNumber.ToString(); isOpen = animator.GetBool(animBoolNameNum); if (isOpen) { messageText.text = "(E) Cerrar"; } else { messageText.text = "(E) Abrir"; } } else { messageText.text = focus.GetActionName() + " (E)"; } if ((Input.GetKeyDown(KeyCode.E)) && !pauseMenu.GameIsPaused && !pauseMenu.SeenRecomendations) { focus.PerformActions(); if (moveableObject != null) { animator.enabled = true; animator.SetBool(animBoolNameNum, !isOpen); } } } } else if (focusVariable != null) { if (focusVariable.allowInteract) { messagePanel.SetActive(true); messageText.text = focusVariable.GetActionName() + " (E)"; if ((Input.GetKeyDown(KeyCode.E)) && !pauseMenu.GameIsPaused && !pauseMenu.SeenRecomendations) { focusVariable.PerformActions(); } } } } else { messagePanel.SetActive(false); messageText.text = ""; } } private void MoventControl() { float moveX = Input.GetAxis("Horizontal"); float moveY = Input.GetAxis("Vertical"); Vector3 move = transform.right * moveX + transform.forward * moveY + transform.up * 0; characterController.Move(move * movementSpeed * Time.deltaTime); } private void MouseLook() { float mouseX = Input.GetAxis("Mouse X") * mouseSensitivity * Time.deltaTime; float mouseY = Input.GetAxis("Mouse Y") * mouseSensitivity * Time.deltaTime; xRotation -= mouseY; xRotation = Mathf.Clamp(xRotation, -90f, 90f); playerCamera.transform.localRotation = Quaternion.Euler(xRotation, 0f, 0f); playerBody.Rotate(Vector3.up * mouseX); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; using TMPro; public class CurrentStatus : MonoBehaviour { [SerializeField] private StateController stateController; [SerializeField] private GameObject energyText; [SerializeField] private GameObject satietyText; [SerializeField] private GameObject thirstText; [SerializeField] private GameObject funText; [SerializeField] private GameObject stressText; [SerializeField] private GameObject happinessText; [SerializeField] private GameObject weightText; [SerializeField] private GameObject brakefatCheck; [SerializeField] private GameObject bathCheck; [SerializeField] private GameObject workCheck; [SerializeField] private GameObject cleanCheck; [SerializeField] private GameObject eatCheck; [SerializeField] private GameObject dinnerCheck; private TimeController timeController; // Start is called before the first frame update void Start() { stateController = GameObject.Find("Player").GetComponent<StateController>(); timeController = GameObject.Find("EventSystem").GetComponent<TimeController>(); } // Update is called once per frame void Update() { if (!stateController.bath.boolValue && stateController.hygiene.value <= 30) { bathCheck.SetActive(true); } else { bathCheck.SetActive(false); } if ((timeController.hourCounter >= 19 && timeController.hourCounter < 22) && stateController.dinner.boolValue) { dinnerCheck.SetActive(true); } else { dinnerCheck.SetActive(false); } if ((timeController.hourCounter >= 13 && timeController.hourCounter < 16) && stateController.eat.boolValue) { eatCheck.SetActive(true); } else { eatCheck.SetActive(false); } if ((timeController.hourCounter >= 8 && timeController.hourCounter < 11) && stateController.brakefast.boolValue) { brakefatCheck.SetActive(true); } else { brakefatCheck.SetActive(false); } if ((timeController.hourCounter >= 9 && timeController.hourCounter < 15) && stateController.work.boolValue) { workCheck.SetActive(true); } else { workCheck.SetActive(false); } if (stateController.weigth.value >= 85) { weightText.SetActive(true); } else { weightText.SetActive(false); } if (stateController.happines.value <= 60) { happinessText.SetActive(true); } else { happinessText.SetActive(false); } if (stateController.stress.value >= 40) { stressText.SetActive(true); } else { stressText.SetActive(false); } if (stateController.fun.value <= 60) { funText.SetActive(true); } else { funText.SetActive(false); } if (stateController.hygiene.value <= 30) { cleanCheck.SetActive(true); } else { cleanCheck.SetActive(false); } if (stateController.energy.value <= 50) { energyText.SetActive(true); } else { energyText.SetActive(false); } if (stateController.satiety.value <= 60) { satietyText.SetActive(true); } else { satietyText.SetActive(false); } if (stateController.thirst.value <= 60) { thirstText.SetActive(true); } else { thirstText.SetActive(false); } } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class IntractableObject { private string gameName { get; set; } private string Action { get; set; } public IntractableObject(string Name) { gameName = Name; switch (Name) { case "Bed": Action = "Dormir"; break; case "Basket": Action = "Tirar"; break; case "Computer": Action = "Trabajar"; break; default: break; } } public string getName() { return gameName; } public string getAction() { return Action; } } <file_sep>using System; using System.Collections; using System.Collections.Generic; using TMPro; using UnityEngine; public class TimeController : MonoBehaviour { [SerializeField] private int timeSpeed; [SerializeField] private float totalSeconds = 85400; [SerializeField] private TextMeshProUGUI hourText; [SerializeField] private TextMeshProUGUI dayText; private PassiveController passive; public int dayCounter { get; private set; } public int hourCounter { get; private set; } private string[] days = { "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado", "Domingo" }; private int minutesCounter; private readonly int secondsDay = 84600; private readonly int secondsHour = 3600; public GameObject Sun; private float degrees; private int hourPrev; void Start() { hourPrev = hourCounter; passive = GameObject.Find("Player").GetComponent<PassiveController>(); } // Update is called once per frame void Update() { CountAndModifyTime(); if (totalSeconds >= 590000) { passive.GameOver("win"); } CalculateDegrees(); MoveSun(); ChangeText(); } public int GetHour() { return hourCounter; } public void AddHours(int hours) { totalSeconds += hours * secondsHour; } public void AddHalfHour() { totalSeconds += secondsHour / 2; } private void ChangeText() { if (hourText != null) hourText.text = ("Hora ") + hourCounter.ToString() + (" : ") + minutesCounter.ToString(); if (dayText != null) dayText.text = "Día: " + days[dayCounter]; } private void MoveSun() { if (Sun != null) { Sun.transform.localEulerAngles = new Vector3(degrees, -90f, 0f); } } private void CalculateDegrees() { degrees = (totalSeconds / 240) - 120f; } private void CountAndModifyTime() { totalSeconds += Time.deltaTime * timeSpeed; dayCounter = (int)(totalSeconds / secondsDay); hourCounter = (int)(Mathf.Floor((totalSeconds - (dayCounter * secondsDay)) / secondsHour)); int diferenceBetweenHours; if (hourCounter != hourPrev) { diferenceBetweenHours = DiferenceHours(hourCounter); if (dayCounter == 0 && hourCounter <= 7) { hourPrev = hourCounter; return; } passive.PerformActionByHour(hourCounter); hourPrev = hourCounter; } minutesCounter = (int)(Mathf.Floor(totalSeconds - (dayCounter * secondsDay) - (hourCounter * secondsHour)) / 60); } public int DiferenceHours(int hourCounter) { if (hourCounter > hourPrev) { return (hourCounter - hourPrev); } else { passive.CleanStatus(); return (hourCounter - hourPrev) + 24; } } } <file_sep>using System; using System.Collections; using System.Collections.Generic; using UnityEngine; using UnityEngine.Video; using UnityEngine.Audio; public class PassiveController : MonoBehaviour { private StateController stateController; [SerializeField] private GameController gameController; private int hungerPenalization = 0; private int hungerTime = 0; private int thirstTime = 0; ObjectInteractable currentFocus; private TimeController timeController; private int noHealtyFoodDay = 0; private int foodDay = 0; private VideoPlayer video; private AudioSource spotify; private int stresCounter; private GameOverAnalisis gameOverAnalisis; // Start is called before the first frame update void Start() { stateController = gameObject.GetComponent<StateController>(); gameController = GameObject.Find("EventSystem").GetComponent<GameController>(); timeController = GameObject.Find("EventSystem").GetComponent<TimeController>(); video = GameObject.Find("TVCanvas").GetComponent<VideoPlayer>(); spotify = GameObject.Find("Spotify").GetComponent<AudioSource>(); gameOverAnalisis = gameObject.GetComponent<GameOverAnalisis>(); } // Update is called once per frame void Update() { StartCoroutine(CheckForInstnatPassive()); } public void PerfromModification(ObjectInteractable focus) { currentFocus = focus; if (focus.doBath) { Bath(); } else if (focus.doEat) { Eat(); } else if (focus.doClean) { Clean(); } else if (focus.doSleep) { Sleep(); } else if (focus.doWork) { Work(); } else if (focus.doACall) { Call(); } else if (focus.doExercise) { Exercise(); } else if (focus.turnOnTV) { VideoClip videoClip = video.GetComponent<AudioRandom>().GetVideoSource(); if (video.isPlaying) { video.Stop(); } else { video.clip = videoClip; video.Play(); GeneralMod(); } } else if (focus.turnOnStereo) { AudioClip audioClip = spotify.GetComponent<AudioRandom>().GetAudioSource(); if (spotify.isPlaying) { spotify.Stop(); } else { spotify.clip = audioClip; spotify.Play(); GeneralMod(); } } else { GeneralMod(); } currentFocus = null; } private void Exercise() { stateController.energy.ModifyValue(currentFocus.energyModification - hungerPenalization); stateController.stress.ModifyValue(currentFocus.stressModification); if (stateController.weigth.value + currentFocus.weightModification >= 74) { stateController.weigth.ModifyValue(currentFocus.weightModification); } timeController.AddHours(currentFocus.hoursSpent); gameController.RunFade(); } private void Call() { if (timeController.hourCounter >= 16 && timeController.hourCounter <= 23) { stateController.energy.ModifyValue(currentFocus.energyModification - hungerPenalization); stateController.social.ModifyValue(currentFocus.socialModification); timeController.AddHours(currentFocus.hoursSpent); gameController.RunFade(); } } private void GeneralMod() { stateController.energy.ModifyValue(currentFocus.energyModification - hungerPenalization); stateController.social.ModifyValue(currentFocus.socialModification); AddWeigthForExtraSatitey(); stateController.satiety.ModifyValue(currentFocus.satietyModification); stateController.thirst.ModifyValue(currentFocus.thirstModification); stateController.hygiene.ModifyValue(currentFocus.hygieneModification); stateController.fun.ModifyValue(currentFocus.funModification); stateController.stress.ModifyValue(currentFocus.stressModification); stateController.happines.ModifyValue(currentFocus.happinessModification); if (stateController.weigth.value+currentFocus.weightModification>=74) { stateController.weigth.ModifyValue(currentFocus.weightModification); } if (currentFocus.isFood) { foodDay++; if (currentFocus.notHealty) { noHealtyFoodDay++; } CheckWeightIncressing(); } timeController.AddHours(currentFocus.hoursSpent); } private void Work() { if ((timeController.hourCounter >= 9 && timeController.hourCounter < 15) && !stateController.work.boolValue) { int late = timeController.hourCounter - 9; int workingHours = 15 - timeController.hourCounter; int stressAdded = (workingHours / 4); int prevsocial = stateController.social.value; stateController.energy.ModifyValue(currentFocus.energyModification - hungerPenalization); stateController.social.ModifyValue(prevsocial + currentFocus.socialModification); stateController.stress.ModifyValue(10 * late + 10 * stressAdded); stateController.fun.ModifyValue(currentFocus.funModification); stateController.work.ModifyValue(true); timeController.AddHours(workingHours); gameController.RunFade(); } else { stateController.energy.ModifyValue(-10 - hungerPenalization); stateController.fun.ModifyValue(40); stateController.social.ModifyValue(30); stateController.stress.ModifyValue(-20); timeController.AddHours(3); } } private void Clean() { stateController.fun.ModifyValue(currentFocus.funModification); stateController.energy.ModifyValue(currentFocus.energyModification - hungerPenalization); stateController.hygiene.ModifyValue(currentFocus.hygieneModification); stateController.clean.ModifyValue(true); timeController.AddHours(currentFocus.hoursSpent); gameController.RunFade(); } private void Eat() { if ((timeController.hourCounter >= 8 && timeController.hourCounter <= 11) && !stateController.brakefast.boolValue) { stateController.brakefast.ModifyValue(true); } else if ((timeController.hourCounter > 13 && timeController.hourCounter <= 16) && !stateController.eat.boolValue) { stateController.eat.ModifyValue(true); } else if ((timeController.hourCounter > 19 && timeController.hourCounter <= 22) && !stateController.dinner.boolValue) { stateController.dinner.ModifyValue(true); } else { AddWeigthForExtraSatitey(); } foodDay++; CheckWeightIncressing(); stateController.energy.ModifyValue(currentFocus.energyModification - hungerPenalization); stateController.satiety.ModifyValue(currentFocus.satietyModification); timeController.AddHalfHour(); gameController.RunFade(); } private void AddWeigthForExtraSatitey() { if (stateController.satiety.value + currentFocus.satietyModification > stateController.satiety.maxValue) { Debug.Log("enter to satiety"); stateController.weigth.ModifyValue(((stateController.satiety.value + currentFocus.satietyModification) - stateController.satiety.maxValue) / 20); }; } private void CheckWeightIncressing() { if (foodDay == 5) { stateController.weigth.ModifyValue(1); foodDay = 0; } if (noHealtyFoodDay == 3) { stateController.weigth.ModifyValue(1); noHealtyFoodDay = 0; } } private void Bath() { if (!stateController.bath.boolValue) { stateController.bath.ModifyValue(true); stateController.energy.ModifyValue(currentFocus.energyModification - hungerPenalization); stateController.hygiene.ModifyValue(currentFocus.hygieneModification); timeController.AddHours(currentFocus.hoursSpent); gameController.RunFade(); } } private void Sleep() { if (stateController.energy.value <= 40) { int prevsocial = stateController.social.value; int prevFun = stateController.social.value; stateController.takignANap.ModifyValue(true); stateController.sleep.ModifyValue(true); if (stateController.weigth.value >= 80) { stateController.energy.ModifyMaxValue(80); } else { stateController.energy.ModifyMaxValue(100); } if (stateController.stress.value >= 80) { GameOver("stress"); } stateController.energy.ModifyValue(100); stateController.stress.ModifyValue(-20); stateController.hygiene.ModifyValue(-100); gameController.RunFade(); timeController.AddHours(8); stateController.takignANap.ModifyValue(false); } } IEnumerator CheckForInstnatPassive() { stateController.happines.ModifyValue((stateController.fun.value + stateController.social.value) / 2); if (stateController.satiety.value <= 50) { hungerPenalization = 5; } else { hungerPenalization = 0; } if (stateController.energy.value == 0) { Sleep(); yield return new WaitForSeconds(2); } yield return null; } public void GameOver(string reason) { Time.timeScale = 0f; gameOverAnalisis.GameOverReason(reason); } public void PerformActionByHour(int hour) { Debug.Log("TIME DAY"+timeController.dayCounter); if (hour==23) { CleanStatus(); } if (hour >= 15&& stateController.work.boolValue==false && timeController.dayCounter < 5) { GameOver("work"); } if (stateController.weigth.value > 95) { GameOver("fat"); } if (stateController.stress.value>=90) { stresCounter++; } else { stresCounter = 0; } if (stresCounter>=5) { GameOver("stress"); } if (stateController.social.value <= 60 && !stateController.takignANap.boolValue) { stateController.stress.ModifyValue(10); } if (stateController.fun.value <= 40 && !stateController.takignANap.boolValue) { stateController.stress.ModifyValue(10); } if (!stateController.takignANap.boolValue) { stateController.fun.ModifyValue(-10); stateController.energy.ModifyValue(-5); } if (hour == 23 && stateController.hygiene.value <= 50 && (!stateController.takignANap.boolValue)) { stateController.stress.ModifyValue(10); } if (stateController.satiety.value <= 20) { hungerTime++; } else { hungerTime = 0; } if (stateController.thirst.value <= 40) { thirstTime++; } else { thirstTime = 0; } if (thirstTime >= 12 ) { GameOver("hid"); } if ( hungerTime >= 12) { GameOver("hunger"); } stateController.social.ModifyValue(-5); stateController.satiety.ModifyValue(-10); stateController.thirst.ModifyValue(-10); if (hour==23&&timeController.dayCounter==6) { GameOver("win"); } } public void CleanStatus() { stateController.brakefast.ModifyValue(false); stateController.bath.ModifyValue(false); stateController.work.ModifyValue(false); stateController.clean.ModifyValue(false); stateController.eat.ModifyValue(false); stateController.dinner.ModifyValue(false); stateController.sleep.ModifyValue(false); noHealtyFoodDay = 0; foodDay = 0; } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class ObjectInteractable : MonoBehaviour { /// <summary> /// ATRIBUTOS GENERALES /// </summary> [SerializeField] private float radius = 2f; [SerializeField] private string actionName; [SerializeField] private PassiveController passiveController; private GameObject player; private TimeController timeController; public bool allowInteract { get; private set; } public string nameObject { get; private set; } /// <summary> /// ATRIBUTOS MULTIPLES /// </summary> [SerializeField] private int[] initialHours; [SerializeField] private int[] finalalHours; [SerializeField] private string[] actionsName; /// <summary> /// ACCIONES /// </summary> [SerializeField] public bool doBath; [SerializeField] public bool doWork; [SerializeField] public bool doClean; [SerializeField] public bool doEat; [SerializeField] public bool doSleep; [SerializeField] public bool doACall; [SerializeField] public bool doExercise; [SerializeField] public int energyModification; [SerializeField] public int socialModification; [SerializeField] public int satietyModification; [SerializeField] public int thirstModification; [SerializeField] public int hygieneModification; [SerializeField] public int funModification; [SerializeField] public int stressModification; [SerializeField] public int happinessModification; [SerializeField] public int weightModification; [SerializeField] public int hoursSpent; [SerializeField] public bool isFood; [SerializeField] public bool notHealty; [SerializeField] public bool turnOnTV; [SerializeField] public bool turnOnStereo; // Start is called before the first frame update void Start() { player = GameObject.Find("Player"); nameObject = gameObject.name; passiveController = player.GetComponent <PassiveController>(); timeController = GameObject.Find("EventSystem").GetComponent<TimeController>(); } // Update is called once per frame void Update() { float distance = Vector3.Distance(player.GetComponent<Transform>().position, transform.position); if (distance <= radius) { allowInteract = true; } else { allowInteract = false; } } public void PerformActions() { passiveController.PerfromModification(gameObject.GetComponent<ObjectInteractable>()); } private void OnDrawGizmosSelected() { Gizmos.color = Color.yellow; Gizmos.DrawWireSphere(transform.position, radius); } public string GetActionName() { if (actionsName.Length > 0) { for (int i = 0; i < actionsName.Length; i++) { if (timeController.hourCounter >= initialHours[i] && timeController.hourCounter <= finalalHours[i]) { return actionsName[i]; } return actionName; } } return actionName; } } <file_sep>using System.Collections; using System.Collections.Generic; using TMPro; using UnityEngine; using UnityEngine.UI; public class SatatusController : MonoBehaviour { [SerializeField] private TextMeshProUGUI energyText; [SerializeField] private TextMeshProUGUI socialText; [SerializeField] private TextMeshProUGUI satietyText; [SerializeField] private TextMeshProUGUI thristText; [SerializeField] private TextMeshProUGUI hygineText; [SerializeField] private TextMeshProUGUI funText; [SerializeField] private TextMeshProUGUI stressText; [SerializeField] private TextMeshProUGUI happinessText; [SerializeField] private TextMeshProUGUI weightText; [SerializeField] private Slider energyBar; [SerializeField] private Slider socialBar; [SerializeField] private Slider satietyBar; [SerializeField] private Slider thristBar; [SerializeField] private Slider hygineBar; [SerializeField] private Slider funBar; [SerializeField] private Slider stressBar; [SerializeField] private Slider happinessBar; [SerializeField] private GameObject BrakefatCheck; [SerializeField] private GameObject BathCheck; [SerializeField] private GameObject WorkCheck; [SerializeField] private GameObject CleanCheck; [SerializeField] private GameObject EatCheck; [SerializeField] private GameObject DinnerCheck; [SerializeField] private GameObject SleepChake; [SerializeField] private ModifiedClock clock; FullSate state = new FullSate(); void FixedUpdate() { BrakefatCheck.SetActive(state.brakefast); BathCheck.SetActive(state.bath); WorkCheck.SetActive(state.work); CleanCheck.SetActive(state.clean); EatCheck.SetActive(state.eat); DinnerCheck.SetActive(state.dinner); SleepChake.SetActive(state.sleep); energyText.text = state.GetEnergy().currentValue.ToString() + "%"; socialText.text = state.GetSocial().currentValue.ToString() + "%"; satietyText.text = state.GetSatiety().currentValue.ToString() + "%"; thristText.text = state.GetThirst().currentValue.ToString() + "%"; hygineText.text = state.GetHygiene().currentValue.ToString() + "%"; funText.text = state.GetFun().currentValue.ToString() + "%"; stressText.text = state.GetStress().currentValue.ToString() + "%"; happinessText.text = state.GetHappiness().currentValue.ToString() + "%"; weightText.text = state.GetWeight().currentValue.ToString() + "Kg"; energyBar.value = state.GetEnergy().currentValue; socialBar.value = state.GetSocial().currentValue; satietyBar.value = state.GetSatiety().currentValue; thristBar.value = state.GetThirst().currentValue; hygineBar.value = state.GetHygiene().currentValue; funBar.value = state.GetFun().currentValue; stressBar.value = state.GetStress().currentValue; happinessBar.value = state.GetHappiness().currentValue; } public FullSate GetState() { return state; } public ModifiedClock GetClock() { return clock; } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class FullSate { [SerializeField] private State energy = new State(100); [SerializeField] private State social = new State(100); [SerializeField] private State satiety = new State(100); [SerializeField] private State thirst = new State(100); [SerializeField] private State hygiene = new State(100); [SerializeField] private State fun = new State(100); [SerializeField] private State stress = new State(100,true); [SerializeField] private State happiness = new State(100); [SerializeField] private State weight = new State(80); public bool brakefast { get; private set; } public bool bath { get; private set; } public bool work { get; private set; } public bool clean { get; private set; } public bool eat { get; private set; } public bool dinner { get; private set; } public bool sleep { get; private set; } public FullSate() { brakefast = false; bath = false; work = false; clean = false; eat = false; dinner = false; sleep = false; } public State GetEnergy() { return energy; } public State GetSocial() { return social; } public State GetSatiety() { return satiety; } public State GetThirst() { return thirst; } public State GetHygiene() { return hygiene; } public State GetFun() { return fun; } public State GetStress() { return stress; } public State GetHappiness() { return happiness; } public State GetWeight() { return weight; } public void Brakefast() { brakefast = !brakefast; } public void Bath() { bath = !bath; } public void Work() { work = !work; } public void Clean() { clean = !clean; } public void Eat() { eat = !eat; } public void Dinner() { dinner = !dinner; } public void Sleep() { sleep = !sleep; } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class StateModifyerByInteraction : MonoBehaviour { private StateController stateController; [SerializeField] private int energyModification; [SerializeField] private int socialModification; [SerializeField] private int satietyModification; [SerializeField] private int thirstModification; [SerializeField] private int hygieneModification; [SerializeField] private int funModification; [SerializeField] private int stressModification; [SerializeField] private int happinessModification; [SerializeField] private int weightModification; // Start is called before the first frame update void Start() { stateController = GameObject.Find("Player").GetComponent<StateController>(); } // Update is called once per frame void Update() { } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class ActionsModifier : MonoBehaviour { [SerializeField] private bool doBath; [SerializeField] private bool doWokr; [SerializeField] private bool doClean; [SerializeField] private bool doEat; [SerializeField] private bool doSleep; [SerializeField] private int energyModification; [SerializeField] private int socialModification; [SerializeField] private int satietyModification; [SerializeField] private int thirstModification; [SerializeField] private int hygieneModification; [SerializeField] private int funModification; [SerializeField] private int stressModification; [SerializeField] private int happinessModification; [SerializeField] private int weightModification; [SerializeField] private int hoursSpent; [SerializeField] private bool itIsFun; [SerializeField] private bool notHealty; private int debuffStress = 0; private StateController stateController; [SerializeField] private GameController gameController; [SerializeField] private Passives passives; private TimeController timeController; public int passiveDebuffToEnergy { get; private set; } = 0; public void SetDebuffToEnergy(int debuff) { passiveDebuffToEnergy = debuff; } void Start() { if (gameController == null) { gameController = GameObject.Find("EventSystem").GetComponent<GameController>(); } stateController = GameObject.Find("Player").GetComponent<StateController>(); timeController = GameObject.Find("EventSystem").GetComponent<TimeController>(); passives = GameObject.Find("EventSystem").GetComponent<Passives>(); } public void PerformStateModifications() { if (itIsFun) { debuffStress = 10; } else { debuffStress = 0; } stateController.energy.ModifyValue(energyModification+passiveDebuffToEnergy); stateController.social.ModifyValue(socialModification); stateController.satiety.ModifyValue(satietyModification); stateController.thirst.ModifyValue(thirstModification); stateController.hygiene.ModifyValue(hygieneModification); stateController.fun.ModifyValue(funModification); stateController.stress.ModifyValue(stressModification-debuffStress); stateController.happines.ModifyValue(happinessModification); stateController.weigth.ModifyValue(weightModification); timeController.AddHours(hoursSpent); } public void ActivateAction() { if (doBath) { if (!stateController.bath.boolValue) { Bath(); } } else if (doWokr) { if (stateController.energy.value >= 30) { Work(); } } else if (doClean) { Clean(); } else if (doEat) { if (timeController.hourCounter >= 8 && timeController.hourCounter <= 11) { Eat(); stateController.brakefast.ModifyValue(true); } else if (timeController.hourCounter > 13 && timeController.hourCounter <= 16) { Eat(); stateController.eat.ModifyValue(true); } else if (timeController.hourCounter > 19 && timeController.hourCounter <= 22) { Eat(); stateController.dinner.ModifyValue(true); } else { Eat(); } } else if (doSleep) { // Solo si el jugador tiene 40 o menos puntos de energía podrá dormir voluntariamente sin importar la hora. if (stateController.energy.value <= 40) { Sleep(); } } else { PerformStateModifications(); return; } } public void Bath() { //Bañarse genera 50 puntos de higiene, solo se puede hacer 1 vez cada 24hrs/juego (significativa 1 hora) consume 10 de energía y resta un punto de estrés. -interactuar en regadera- hygieneModification = 50; energyModification = -10; stressModification = -10; hoursSpent = 1; gameController.RunFade(); stateController.bath.ModifyValue(true); PerformStateModifications(); } public void Eat() { //Desayunar recargara 60 puntos de saciedad, recarga 10 de energía, consume 30 minutos/juego (significativa) -Interactuar con refrigerador- satietyModification = 60; energyModification = 10; hoursSpent = 1; passives.MealTook(notHealty); if (stateController.satiety.value+satietyModification>stateController.satiety.maxValue) { int exesiveEat = (stateController.satiety.value + satietyModification) - stateController.satiety.maxValue; for (int i = exesiveEat; i >0;) { weightModification += 1; i -= 5; } } gameController.RunFade(); Debug.Log("state eat "+stateController.weigth.value); PerformStateModifications(); Debug.Log("state eat after " + stateController.weigth.value); } public void Clean() { //Limpiar sumara 50 puntos al estatus de higiene, restara 30 de energía y restara 10 de diversión (significativa hora y media) – interactuar con objetos de limpieza en cuarto del lavado- hygieneModification = 50; energyModification = -30; funModification = -10; hoursSpent = 2; gameController.RunFade(); stateController.clean.ModifyValue(true); PerformStateModifications(); } public void Work() { if (timeController.hourCounter > 9) { int lateWorkTime = timeController.hourCounter - 9; stressModification = lateWorkTime * 10; } //Trabajar restara 20 puntos al estatus de diversión, restara 30 de energía y otorgara 10 de social. funModification = -20; energyModification = -30; socialModification = 10; //Trabajar restara 20 puntos al estatus de diversión, restara 30 de energía y otorgara 10 de social. //El trabajo inicia a las 9:00 am y termina a las 3:00 pm de lunes a viernes, se puede llegar tarde al trabajo, por cada hora de retraso se añadirá un punto de estrés adicional, sin importar si llega tarde la hora de salida siempre sera a las 2:00 pm(significativa) - interactuar con computadora- hoursSpent = 14 - timeController.hourCounter; //El trabajo inicia a las 9:00 am y termina a las 3:00 pm de lunes a viernes, se puede llegar tarde al trabajo, por cada hora de retraso se añadirá un punto de estrés adicional, sin importar si llega tarde la hora de salida siempre sera a las 2:00 pm (significativa) -interactuar con computadora- //Por cada 3 horas de trabajo se sumara un punto de estres stressModification = hoursSpent / 3; ///Por cada 3 horas de trabajo se sumara un punto de estres gameController.RunFade(); stateController.work.ModifyValue(true); PerformStateModifications(); //Mientras se realiza una actividad social, se duerme o se trabaja no se perderán puntos del estatus social passives.SetLastHour(passives.lastHour + hoursSpent); } public void Sleep() { //Dormir siempre consumirá 8hrs / juego del día y recargara el máximo de energía stateController.takignANap.ModifyValue(true); stateController.hygiene.ModifyValue(-100); energyModification = 100; hoursSpent = 8; funModification = (stateController.fun.value + stateController.social.value) / 2; gameController.RunFade(); stateController.sleep.ModifyValue(true); PerformStateModifications(); //Mientras se realiza una actividad social, se duerme o se trabaja no se perderán puntos del estatus social passives.SetLastHour(passives.lastHour + hoursSpent); if (stateController.weigth.value>80) { stateController.energy.ModifyMaxValue(80); } else { stateController.energy.ModifyMaxValue(100); } stateController.takignANap.ModifyValue(false); } } <file_sep>using UnityEngine; using System.Collections; public class MoveableObject : MonoBehaviour { public int objectNumber = 1; } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class MakeFade : MonoBehaviour { public Animator animator; // Update is called once per frame public Actions playerActions; public void ApplyTransition() { animator.SetTrigger("Fade"); } } <file_sep>using System.Collections; using System.Collections.Generic; using TMPro; using UnityEngine; public class InteractableConfig : MonoBehaviour { private string nameObject; [SerializeField] private string actionName; [SerializeField] private float radius = 2f; public bool allowInteract { get; private set; } private GameObject player; private TextMeshProUGUI messageText; private GameObject messagePanel; private ActionsModifier actionsModifier; void Start() { nameObject = gameObject.name; player = GameObject.Find("Player"); actionsModifier = gameObject.GetComponent<ActionsModifier>(); } // Update is called once per frame private void Update() { float distance = Vector3.Distance(player.GetComponent<Transform>().position, transform.position); if (distance <= radius) { allowInteract = true; } else { allowInteract = false; } } public void PerformActions() { if (actionsModifier!=null) { actionsModifier.ActivateAction(); } } public string GetName() { return nameObject; } public string GetActionName() { return actionName; } private void OnDrawGizmosSelected() { Gizmos.color = Color.yellow; Gizmos.DrawWireSphere(transform.position, radius); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class GoToLinkUnique : MonoBehaviour { public string Link; public void OpenLink() { Application.OpenURL(Link); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; using UnityEngine.UI; public class Checkmark : MonoBehaviour { private GameObject CheckmarkImage; //GameObject CheckmarkImageStatus = GameObject.Find("Checkmark"); // Start is called before the first frame update void Start() { CheckmarkImage = GameObject.Find("CheckmarkImageB"); //.gameObject.SetActive(true); } // Update is called once per frame void Update() { if (Input.GetKeyDown("g")) CheckmarkImage.SetActive(true); if (Input.GetKeyDown("f")) CheckmarkImage.SetActive(false); //CheckmarkImage.gameObject.SetActive(true); } } <file_sep>using System; using System.Collections; using System.Collections.Generic; using UnityEngine; using UnityEngine.SceneManagement; public class PauseMenu : MonoBehaviour { [SerializeField] public bool GameIsPaused { get;private set; } = false; [SerializeField] public bool SeenRecomendations { get; private set; } = false; public GameObject PauseMenuUI; public GameObject PalyerMenu; public GameObject RecomendationsMenu; public GameObject GameOverMenu; // Update is called once per frame void Update() { if (Input.GetKeyDown(KeyCode.Escape)&&!SeenRecomendations) { if (GameIsPaused) { Resume(); } else { Pause(); } } if (Input.GetKeyDown(KeyCode.R)&&!GameIsPaused) { if (SeenRecomendations) { HideRecomendations(); } else { ShowRecomendations(); } } } private void HideRecomendations() { Cursor.lockState = CursorLockMode.Locked; Cursor.visible = false; RecomendationsMenu.SetActive(false); PalyerMenu.SetActive(true); Time.timeScale = 1f; SeenRecomendations = false; } private void ShowRecomendations() { Cursor.lockState = CursorLockMode.None; Cursor.visible = true; RecomendationsMenu.SetActive(true); PalyerMenu.SetActive(false); Time.timeScale = 0f; SeenRecomendations = true; } public void GameOver() { Cursor.lockState = CursorLockMode.None; Cursor.visible = true; GameOverMenu.SetActive(true); PalyerMenu.SetActive(false); Time.timeScale = 0f; } public void Resume() { Cursor.lockState = CursorLockMode.Locked; Cursor.visible = false; PauseMenuUI.SetActive(false); PalyerMenu.SetActive(true); Time.timeScale = 1f; GameIsPaused = false; } void Pause() { Cursor.lockState = CursorLockMode.None; Cursor.visible = true; PauseMenuUI.SetActive(true); PalyerMenu.SetActive(false); Time.timeScale = 0f; GameIsPaused = true; } public void QuitGame() { Application.Quit(); } public void LoadMenu() { Time.timeScale = 1f; SceneManager.LoadScene(0); } public void LoadCredits() { Time.timeScale = 1f; SceneManager.LoadScene(2); } } <file_sep>using System; using System.Collections; using System.Collections.Generic; using UnityEngine; public class Passives : MonoBehaviour { ActionsModifier actionsModifier; private StateController stateController; private TimeController timeController; private int hoursWithHunger = 0; private int hoursWithThirst = 0; public int lastHour { get; private set; } public int notHealtyMeal { get; private set; } = 0; // Start is called before the first frame update void Start() { stateController = GameObject.Find("Player").GetComponent<StateController>(); actionsModifier = gameObject.GetComponent<ActionsModifier>(); timeController = gameObject.GetComponent<TimeController>(); lastHour = timeController.GetHour(); } // Update is called once per frame void Update() { int diference = timeController.hourCounter - lastHour; StartCoroutine(CheckForDebuff()); StartCoroutine(PassivePerHour(diference)); StartCoroutine(PassiveNoEnegy()); StartCoroutine(CheckForDeath()); StartCoroutine(CheckForHygiene(diference)); SetLastHour(lastHour + diference); } IEnumerator CheckForHygiene(int diference) { if (diference != 0&&timeController.hourCounter==0) { stateController.stress.ModifyValue(10); } yield return null; } IEnumerator CheckForDebuff() { if (stateController.satiety.value < 50 && !stateController.takignANap.boolValue) { actionsModifier.SetDebuffToEnergy(5); } else { actionsModifier.SetDebuffToEnergy(0); } yield return null; } IEnumerator CheckForDeath() { if ( hoursWithHunger>=12|| hoursWithThirst>12|| (stateController.stress.value >= 80 && stateController.takignANap.boolValue) ) { Debug.Log("YOU ARE DEAD"); } yield return null; } IEnumerator PassivePerHour(int diference) { //Cada hora/juego el jugador pierde 10 puntos de social. if (diference != 0 && (timeController.dayCounter != 0 || timeController.hourCounter > 7)) { for (int i = 0; i < diference; i++) { stateController.satiety.ModifyValue(-10); stateController.thirst.ModifyValue(-10); if (stateController.satiety.value <= 20) { hoursWithHunger++; } else { hoursWithHunger = 0; } if (stateController.thirst.value<40) { hoursWithThirst++; } else { hoursWithThirst = 0; } } if (!stateController.takignANap.boolValue) { for (int i = 0; i < diference; i++) { stateController.social.ModifyValue(-10); if (stateController.fun.value<=40) { stateController.stress.ModifyValue(-10); } stateController.fun.ModifyValue(-10); if (stateController.social.value <= 60) { stateController.stress.ModifyValue(10); } } yield return new WaitForSeconds(2); } } yield return null; } IEnumerator PassiveNoEnegy() { //Al llegar la energía a 0 el jugador se quedara dormido 8 horas / juego if (stateController.energy.value <= 0 && !stateController.takignANap.boolValue) { actionsModifier.Sleep(); yield return new WaitForSeconds(2); } yield return null; } public void MealTook(bool noHealty) { if (noHealty) { notHealtyMeal += 1; } return; } public void SetLastHour(int hour) { if (hour > 23) { lastHour = hour - 24; } else { lastHour = hour; } } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; using UnityEngine.UI; public class Clock : MonoBehaviour { [Tooltip("Initial time in seconds")] public int initialTime; //Set range at 30f so the time goes 1 hr == 2 min in real life [Tooltip("Clock time scale")] [Range(30f, 20000.0f)] public float timeScale = 1f; private Text myText; private float frameTimeWithTimeScale = 0f; private float timeInSecondsToShow = 0f; private int days = 1; // Start is called before the first frame update void Start() { myText = GetComponent<Text>(); timeInSecondsToShow = initialTime; updateClock(initialTime); } // Update is called once per frame void Update() { //time of each frame related to the time scale frameTimeWithTimeScale = Time.deltaTime * timeScale; //saves time elapsed to show it in the clock timeInSecondsToShow += frameTimeWithTimeScale; updateClock(timeInSecondsToShow); } public void updateClock(float timeInSeconds) { int hours = 0; int minutes = 0; int seconds = 0; string clockText; //Make sure time is not a negative if (timeInSeconds < 0) timeInSeconds = 0; // //calculate hours, minutes and seconds hours = (int)timeInSeconds / 3600; minutes = (int)(timeInSeconds - (hours * 3600)) / 60; seconds = (int)timeInSeconds % 60; if (hours > 23) { hours = 0; timeInSecondsToShow = 0f; days += 1; } //full clock string (to see minutes and seconds) clockText = "Hr: " + hours.ToString("00") + ":" + minutes.ToString("00") + ":" + seconds.ToString("00") + "\n" + "Day: " + days.ToString(); //Print clock string //clockText = "Hr: " + hours.ToString("00") + "\n" + "Day: " + days.ToString(); //Update text myText.text = clockText; } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class DayNight : MonoBehaviour { //Variable //Start with min at 2040 to make the sun appear at 7am //Set the timeSpeed at 1 to make a day = 48mins if the conditions asks for min >= 2880 public float min = 2040; public float degrees; public float timeSpeed = 1; // Update is called once per frame void Update() { //1 day = 48 min min += timeSpeed * Time.deltaTime;//2 sec = 1 min if (min >= 2880)//2880min = 1 dia { min = 0; } //360° / 2880 --> 1° = 0.125min degrees = min / 8; this.transform.localEulerAngles = new Vector3(degrees, -90f, 0f); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class GoToLink : MonoBehaviour { public string BansheesVeilContact; public string LinkedInContact; public string FacebookContact; public void OpenBansheesVeil() { Application.OpenURL(BansheesVeilContact); } public void OpenFacebook() { Application.OpenURL(FacebookContact); } public void OpenLinkedIn() { Application.OpenURL(LinkedInContact); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class Interactable : MonoBehaviour { [SerializeField] private float radius = 2f; public bool allowInteract = false; private GameObject player; private SatatusController playerStatus; private FullSate currentState; [SerializeField] private int energy = 0; [SerializeField] private int social = 0; [SerializeField] private int satiety = 0; [SerializeField] private int thirst = 0; [SerializeField] private int hygiene = 0; [SerializeField] private int fun = 0; [SerializeField] private int stress = 0; [SerializeField] private int happiness = 0; [SerializeField] private int weight = 0; private ModifiedClock clock; public bool brakefast; public bool bath; public bool work; public bool clean; public bool eat; public bool dinner; public bool sleep; public float timeCost; private void Start() { player = GameObject.FindGameObjectWithTag("Player"); playerStatus = player.GetComponent<SatatusController>(); currentState = playerStatus.GetState(); clock = playerStatus.GetClock(); } private void Update() { float distance = Vector3.Distance(player.GetComponent<Transform>().position, transform.position); if (distance <= radius) { allowInteract = true; } else { allowInteract = false; } } public void DoInteraction() { currentState.GetEnergy().ModifyState(energy); currentState.GetSocial().ModifyState(social); currentState.GetSatiety().ModifyState(satiety); currentState.GetThirst().ModifyState(thirst); currentState.GetHygiene().ModifyState(hygiene); currentState.GetFun().ModifyState(fun); currentState.GetStress().ModifyState(stress); currentState.GetHappiness().ModifyState(happiness); currentState.GetWeight().ModifyState(weight); clock.IncressTime(timeCost*3600); } private void OnDrawGizmosSelected() { Gizmos.color = Color.yellow; Gizmos.DrawWireSphere(transform.position, radius); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; using UnityEngine.Video; public class AudioRandom : MonoBehaviour { // Start is called before the first frame update [SerializeField]private AudioClip[] audioList; [SerializeField] private VideoClip[] videoList; void Start() { } // Update is called once per frame void Update() { } public AudioClip GetAudioSource() { return audioList[GetRandomNumber(audioList.Length)]; } public VideoClip GetVideoSource() { return videoList[GetRandomNumber(videoList.Length)]; } private int GetRandomNumber(int lenght) { return Random.Range(0, lenght); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; using TMPro; public class State { public int maxValue { get; private set; } public int currentValue { get; private set; } public State(int maxValue) { this.maxValue = maxValue; currentValue = maxValue; } public State(int maxValue,bool inverse) { this.maxValue = maxValue; if (inverse) { currentValue = 0; } } public void ModifyState(int modification) { int newValue = currentValue + modification; if (newValue>maxValue) { currentValue = maxValue; }else if (newValue < 0) { currentValue = 0; } else { currentValue = newValue; } } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class StateModel { public int maxValue { get; private set; } = 0; public int minValue { get; private set; } = 0; public int value { get; private set; } public bool boolValue { get; private set; } public StateModel(int maxValue, bool isMinVale, int value) { if (isMinVale) { this.minValue = maxValue; } else { this.maxValue = maxValue; } this.value = value; } public StateModel(bool value) { boolValue = value; } public void ModifyValue(bool newValue) { boolValue = newValue; } public void ModifyValue(int newValue) { if (value + newValue > maxValue && maxValue != 0) { value = maxValue; } else if (value + newValue < 0) { value = 0; } else { value += newValue; } } public void ModifyMaxValue(int newValue) { maxValue = newValue; if (value > newValue) { value = newValue; } } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class Actions : MonoBehaviour { [SerializeField] private int hoursSleeping = 8; [SerializeField] private int hoursWorking = 6; public bool isSleeping { get; private set; } public bool isWorking { get; private set; } public FullSate currentState; public MakeFade transition; public ModifiedClock clock; private int previousHour; private int continousHunger = 0; private bool isDead; private void Start() { currentState = gameObject.GetComponent<SatatusController>().GetState(); previousHour = clock.GetHour(); } private void Update() { LoseSocialPasive(); GetStressPassive(); LoseSatityPassive(); ChekHunger(); CheckDeathCondition(); FitHour(); } private void ApplyHungerPenalizatio() { if (currentState.GetSatiety().currentValue<50) { currentState.GetEnergy().ModifyState(-5); } } private void ChekHunger() { if (currentState.GetSatiety().currentValue <= 20&& (previousHour != clock.GetHour())) { continousHunger++; } else { continousHunger = 0; } } private void CheckDeathCondition() { if (continousHunger >= 20) { isDead = true; } if (isDead) { GameOver(); } } private void GameOver() { Debug.Log("shoud game over screen"); } private void LoseSatityPassive() { if ((previousHour != clock.GetHour())) { currentState.GetSatiety().ModifyState(-10 * (clock.GetHour() - previousHour)); } } private void LoseSatityActive(int cuantity) { currentState.GetSatiety().ModifyState(-10 * cuantity); } private void GetStressPassive() { if ((currentState.GetSocial().currentValue <= 60) && (previousHour != clock.GetHour())) { currentState.GetStress().ModifyState(10 * (clock.GetHour() - previousHour)); } } private void LoseSocialPasive() { if ((previousHour != clock.GetHour())) { currentState.GetSocial().ModifyState(-10 * (clock.GetHour() - previousHour)); } } private void FitHour() { previousHour = clock.GetHour(); } public void Sleep() { isSleeping = true; transition.ApplyTransition(); clock.IncressTime(hoursSleeping * 3600); LoseSatityActive(hoursSleeping); FitHour(); if (!currentState.sleep) { currentState.Sleep(); } currentState.GetEnergy().ModifyState(100); isSleeping = false; Debug.Log("should apper a leter for some seconds that said that you felt asleep"); } public void Work() { /* previousHour = clock.GetHour() + hoursWorking; */ Debug.Log("Hello World working"); isWorking = true; transition.ApplyTransition(); clock.IncressTime(hoursWorking * 3600); FitHour(); ApplyHungerPenalizatio(); if (!currentState.work) { currentState.Work(); } isWorking = false; } } <file_sep>using System.Collections; using System.Collections.Generic; using TMPro; using UnityEngine; using UnityEngine.UI; public class StateController : MonoBehaviour { public StateModel energy = new StateModel(100, false, 100); public StateModel social = new StateModel(100 ,false,100); public StateModel satiety = new StateModel(100,false, 100); public StateModel thirst = new StateModel(100,false, 100); public StateModel hygiene = new StateModel(100, false, 0); public StateModel fun = new StateModel(100, false, 100); public StateModel stress = new StateModel(100, false, 0); public StateModel happines = new StateModel(100, false, 100); public StateModel weigth = new StateModel(74,true,74); public StateModel brakefast = new StateModel(false); public StateModel bath = new StateModel(false); public StateModel work = new StateModel(false); public StateModel clean = new StateModel(false); public StateModel eat = new StateModel(false); public StateModel dinner = new StateModel(false); public StateModel sleep = new StateModel(false); public StateModel takignANap = new StateModel(false); [SerializeField] private TextMeshProUGUI energyText; [SerializeField] private TextMeshProUGUI socialText; [SerializeField] private TextMeshProUGUI satietyText; [SerializeField] private TextMeshProUGUI thirstText; [SerializeField] private TextMeshProUGUI hygineText; [SerializeField] private TextMeshProUGUI funText; [SerializeField] private TextMeshProUGUI stressText; [SerializeField] private TextMeshProUGUI happinessText; [SerializeField] private TextMeshProUGUI weightText; [SerializeField] private Slider energyBar; [SerializeField] private Slider socialBar; [SerializeField] private Slider satietyBar; [SerializeField] private Slider thirstBar; [SerializeField] private Slider hygieneBar; [SerializeField] private Slider funBar; [SerializeField] private Slider stressBar; [SerializeField] private Slider happinessBar; [SerializeField] private GameObject brakefatCheck; [SerializeField] private GameObject bathCheck; [SerializeField] private GameObject workCheck; [SerializeField] private GameObject cleanCheck; [SerializeField] private GameObject eatCheck; [SerializeField] private GameObject dinnerCheck; [SerializeField] private GameObject sleepCheck; [SerializeField] private GameObject viewEnegy; [SerializeField] private GameObject viewHunger; [SerializeField] private GameObject viewThirst; [SerializeField] private GameObject viewHygene; [SerializeField] private GameObject viewStress; [SerializeField] private GameObject viewSocial; private void Update() { if (social.value <= 20) { viewSocial.SetActive(true); } else { viewSocial.SetActive(false); } if (stress.value >= 80) { viewStress.SetActive(true); } else { viewStress.SetActive(false); } if (hygiene.value <= 20) { viewHygene.SetActive(true); } else { viewHygene.SetActive(false); } if (thirst.value <= 20) { viewThirst.SetActive(true); } else { viewThirst.SetActive(false); } if (satiety.value <= 20) { viewHunger.SetActive(true); } else { viewHunger.SetActive(false); } if (energy.value <= 20) { viewEnegy.SetActive(true); } else { viewEnegy.SetActive(false); } energyText.text = energy.value.ToString(); socialText.text = social.value.ToString(); satietyText.text = satiety.value.ToString(); thirstText.text = thirst.value.ToString(); hygineText.text = hygiene.value.ToString(); funText.text = fun.value.ToString(); stressText.text = stress.value.ToString(); happinessText.text = happines.value.ToString(); weightText.text = weigth.value.ToString(); energyBar.value = energy.value; socialBar.value = social.value; satietyBar.value = satiety.value; thirstBar.value = thirst.value; hygieneBar.value = hygiene.value; funBar.value = fun.value; stressBar.value = stress.value; happinessBar.value = happines.value; brakefatCheck.SetActive(brakefast.boolValue); bathCheck.SetActive(bath.boolValue); workCheck.SetActive(work.boolValue); cleanCheck.SetActive(clean.boolValue); eatCheck.SetActive(eat.boolValue); dinnerCheck.SetActive(dinner.boolValue); sleepCheck.SetActive(sleep.boolValue); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class GameOverAnalisis : MonoBehaviour { [SerializeField] private GameObject hungerText; [SerializeField] private GameObject hidratationText; [SerializeField] private GameObject stressText; [SerializeField] private GameObject fatText; [SerializeField] private GameObject workText; [SerializeField] private GameObject winText; [SerializeField] private PauseMenu pauseMenu; // Start is called before the first frame update private string reason=""; void Start() { } public void GameOverReason(string reason) { this.reason = reason; pauseMenu.GameOver(); } // Update is called once per frame void Update() { if (reason == "win") { winText.SetActive(true); } else { winText.SetActive(false); } if (reason=="work") { workText.SetActive(true); } else { workText.SetActive(false); } if (reason == "fat") { fatText.SetActive(true); } else { fatText.SetActive(false); } if (reason == "stress") { stressText.SetActive(true); } else { stressText.SetActive(false); } if (reason == "hid") { hidratationText.SetActive(true); } else { hidratationText.SetActive(false); } if (reason == "hunger") { hungerText.SetActive(true); } else { hungerText.SetActive(false); } } } <file_sep>using UnityEngine; using UnityEngine.UI; using System.Collections; public class GameController : MonoBehaviour { // the image you want to fade, assign in inspector [SerializeField] private Image blackOutImage; [SerializeField] private GameObject imageGameObject; private void Start() { RunFade(); } public void RunFade() { // fades the image out when you click imageGameObject.SetActive(true); StartCoroutine(FadeImage(true)); } IEnumerator FadeImage(bool fadeAway) { // fade from opaque to transparent if (fadeAway) { // loop over 1 second backwards for (float i = 2; i >= 0; i -= Time.deltaTime) { // set color with i as alpha blackOutImage.color = new Color(0, 0, 0, i); yield return null; } } // fade from transparent to opaque else { // loop over 1 second for (float i = 0; i <= 2; i += Time.deltaTime) { // set color with i as alpha blackOutImage.color = new Color(0, 0, 0, i); yield return null; } } imageGameObject.SetActive(false); } }<file_sep>using System; using System.Collections; using System.Collections.Generic; using UnityEngine; using TMPro; public class PlayerMovement : MonoBehaviour { [SerializeField] private float mouseSensitivity = 100f; [SerializeField] private float movementSpeed = 12f; [SerializeField] private float InteractionRange = 1.8f; [SerializeField] private GameObject messagePanel; [SerializeField] private TextMeshProUGUI interactionText; public Interactable focus; private CharacterController controller; private Transform playerBody; private Camera mainCamera; private float xRotation = 0f; private int rayLayerMask; private SatatusController currentStatus; private Actions playerActions; public List<Transform> interactablesData = new List<Transform>(); private List<IntractableObject> interctablesObjects = new List<IntractableObject>(); void Start() { Cursor.lockState = CursorLockMode.Locked; playerBody = GetComponent<Transform>(); controller = GetComponent<CharacterController>(); mainCamera = Camera.main; LayerMask iRayLM = LayerMask.NameToLayer("InteractRaycast"); rayLayerMask = 1 << iRayLM.value; playerActions = gameObject.GetComponent<Actions>(); currentStatus = gameObject.GetComponent<SatatusController>(); foreach (var item in interactablesData) { interctablesObjects.Add(new IntractableObject(item.name)); } } void Update() { MouseLook(); MoventControl(); InteractionControl(); } private void FixedUpdate() { } private void InteractionControl() { RaycastHit hit; Vector3 rayOrigin = mainCamera.ViewportToWorldPoint(new Vector3(0.5f, 0.5f, 0.5f)); int currentEnergy = currentStatus.GetState().GetEnergy().currentValue; if (Physics.Raycast(rayOrigin, mainCamera.transform.forward, out hit, InteractionRange, rayLayerMask)) { focus = hit.collider.GetComponent<Interactable>(); if (focus != null) { foreach (var item in interctablesObjects) { if ((item.getName() == focus.name) && focus.allowInteract) { messagePanel.SetActive(true); interactionText.text = item.getAction() + " (E)"; } } if (Input.GetKeyDown(KeyCode.E) && focus.allowInteract) { switch (focus.name) { case "Bed": if (currentEnergy <= 40) { playerActions.Sleep(); } break; case "Computer": if (!currentStatus.GetState().work) { playerActions.Work(); } break; default: focus.DoInteraction(); break; } if (focus.name == "Bed") { } else { } } } } else { messagePanel.SetActive(false); interactionText.text = ""; } } private void MoventControl() { float moveX = Input.GetAxis("Horizontal"); float moveY = Input.GetAxis("Vertical"); Vector3 move = transform.right * moveX + transform.forward * moveY; controller.Move(move * movementSpeed * Time.deltaTime); } private void MouseLook() { float mouseX = Input.GetAxis("Mouse X") * mouseSensitivity * Time.deltaTime; float mouseY = Input.GetAxis("Mouse Y") * mouseSensitivity * Time.deltaTime; xRotation -= mouseY; xRotation = Mathf.Clamp(xRotation, -90f, 90f); mainCamera.transform.localRotation = Quaternion.Euler(xRotation, 0f, 0f); playerBody.Rotate(Vector3.up * mouseX); } } <file_sep>using System.Collections; using System.Collections.Generic; using TMPro; using UnityEngine; public class ModifiedClock : MonoBehaviour { //seconds in a day 86400 public int timeSpeed = 1; private float totalSecondsIngame = 0; public TextMeshProUGUI secondShow; public TextMeshProUGUI dayText; private int dayCounter; private int hourCounter; private int minutesCounter; private int secondsCounter; private float degrees; public GameObject Sun; private string[] dayName = { "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado", "Domingo" }; // Start is called before the first frame update void Start() { } // Update is called once per frame void Update() { int totalsec; totalSecondsIngame += Time.deltaTime * timeSpeed; dayCounter = (int)totalSecondsIngame / 84600; hourCounter = (int)(totalSecondsIngame - (dayCounter * 86400)) / 3600; minutesCounter = (int)(totalSecondsIngame - (hourCounter * 3600)) / 60; secondsCounter = (int)totalSecondsIngame % 60; totalsec = (int)totalSecondsIngame; degrees = (totalSecondsIngame / 240) - 120f; Sun.transform.localEulerAngles = new Vector3(degrees, -90f, 0f); secondShow.text = ("Hora ") + hourCounter.ToString() + (" : ") + minutesCounter.ToString(); dayText.text = "Día: " + dayName[dayCounter].ToString(); } public float GetSeconds() { return totalSecondsIngame*Time.deltaTime; } public int GetDay (){ return dayCounter; } public int GetHour() { return hourCounter; } public int GetMinute() { return minutesCounter; } public void IncressTime(float timeAdded) { totalSecondsIngame += Time.deltaTime * timeAdded; } } <file_sep>using System.Collections; using System.Collections.Generic; using TMPro; using UnityEngine; public class MultipleInteracionConroller : MonoBehaviour { [SerializeField] private float radius = 2f; [SerializeField] private string defaultAction; [SerializeField] private int[] initialHours; [SerializeField] private int[] finalalHours; [SerializeField] private string[] actionsName; public bool allowInteract { get; private set; } private string nameObject; private GameObject player; private TextMeshProUGUI messageText; private GameObject messagePanel; private ActionsModifier actionsModifier; private TimeController timeController; void Start() { timeController = GameObject.Find("EventSystem").GetComponent<TimeController>(); nameObject = gameObject.name; player = GameObject.Find("Player"); actionsModifier = gameObject.GetComponent<ActionsModifier>(); } // Update is called once per frame void Update() { float distance = Vector3.Distance(player.GetComponent<Transform>().position, transform.position); if (distance <= radius) { allowInteract = true; } else { allowInteract = false; } } public void PerformActions() { if (actionsModifier != null) { Debug.Log("performing action"); actionsModifier.ActivateAction(); } } public string GetActionName() { if (actionsName.Length > 0) { for (int i = 0; i < actionsName.Length; i++) { if (timeController.hourCounter >= initialHours[i] && timeController.hourCounter <= finalalHours[i]) { return actionsName[i]; } else { return defaultAction; } } } return defaultAction; } private void OnDrawGizmosSelected() { Gizmos.color = Color.yellow; Gizmos.DrawWireSphere(transform.position, radius); } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class MecanicsController : MonoBehaviour { Actions actionsThePlayer ; FullSate currentState ; // Start is called before the first frame update void Start() { actionsThePlayer =gameObject.GetComponent<Actions>(); currentState = GetComponent<SatatusController>().GetState(); } // Update is called once per frame void Update() { if (currentState.GetEnergy().currentValue<=0&&!actionsThePlayer.isSleeping) { Debug.Log("the player should sleep"); actionsThePlayer.Sleep(); } } }
756aae6614387a2fee36e921a2d277049b3a42ac
[ "C#" ]
32
C#
DemisRincon/Stay-at-home-Code
f636e7f8a5cb343b2a3fee1f7451f1bdeb5e3e5f
f6454c041689fa9ade4bf4016a4fe1a1d21f6493
refs/heads/master
<repo_name>vacary/inria-admm<file_sep>/S-update/ADMM_Master_scipy_inside/ADMM/Solver/Tolerance/iter_totaltolerance.py MAXITER = 1000 ABSTOL = 1e-06 #scaling, otherwise 1e-03 RELTOL = 1e-04 <file_sep>/S-update/ADMM_Master_scipy_inside/Master_time.py ######################################################### #################### MASTER FUNCTION #################### ######################################################### import ADMM def master(solver, problem, rho_method): STRING = 'ADMM.' + solver + '("' + problem + '", "' + rho_method + '")' return eval(STRING) ######################################################### ###################### IMPORT DATA ###################### ######################################################### #Import all the problems hdf5 import os all_problems_all = os.listdir("ADMM/Data/box_stacks/") all_problems_all.sort() all_problems = [all_problems_all[13]] #Import all the solvers #all_solvers = ['cp_N', 'cp_R', 'cp_RR', 'vp_N_He', 'vp_R_He', 'vp_RR_He', 'vp_N_Spectral', 'vp_R_Spectral', 'vp_RR_Spectral', 'vp_N_Wohlberg', 'vp_R_Wohlberg', 'vp_RR_Wohlberg'] all_solvers = ['vp_RR_He'] #all_solvers = ['cp_RR'] ######################################################### ######################### CODE ########################## ######################################################### #Import librearies import numpy as np import pickle #Definition of list dict_master = [] #rho_optimal = ['acary','dicairano','ghadimi','normal'] rho_optimal = ['dicairano'] #rho_optimal = ['acary'] #Time problem/solver for each_problem in all_problems: print('---' + each_problem + '---') dict_problem = [] for each_solver in all_solvers: dict_solver = {'problem': each_problem, 'solver': each_solver} for each_rho in rho_optimal: print(each_solver + ': ' + each_rho) timing = master(each_solver, each_problem, each_rho) #try: # timing = master(each_solver, each_problem, each_rho) #except: # timing = np.nan #NaN dict_solver[each_rho+' (time)'] = timing dict_problem.append(dict_solver) dict_master.append(dict_problem) #Save the data pickle.dump(dict_master, open("time_solver.p", "wb")) <file_sep>/ADMM_Master_scipy_inside/ADMM/Solver/Tolerance/stop_criterion.py import numpy as np from scipy.sparse import csr_matrix from scipy.sparse import csc_matrix def stopcriterion(A,A_T,v,u,b,xi,r,s,r_norm,s_norm,p,n,ABSTOL,RELTOL,rho,k): pri_evalf = np.array([np.linalg.norm(csr_matrix.dot(A,v[k+1])),np.linalg.norm(u[k+1]),np.linalg.norm(b)]) eps_pri = np.sqrt(p)*ABSTOL + RELTOL*np.amax(pri_evalf) dual_evalf = rho * csc_matrix.dot(A_T,xi[k+1]) eps_dual = np.sqrt(n)*ABSTOL + RELTOL*np.linalg.norm(dual_evalf) r_norm.append(np.linalg.norm(r[k+1])) s_norm.append(np.linalg.norm(s[k+1])) if r_norm[k+1]<=eps_pri and s_norm[k+1]<=eps_dual: return 'break' <file_sep>/ADMM_Master_scipy_inside/ADMM/Solver/Rho/Varying/Spectral_relaxed.py ################################# ## Spectral parameter - update ## ################################# import numpy as np from scipy.sparse import csr_matrix eps_corr = 0.2 mod = 2 #varying every 'mod' iterations def penalty(A,Av,u,w,b,xi_hat,ratio,rG,xiG,rho,v,k): if k % mod == 0: step = -k * (mod-1)/mod #Set up of needed constants rG.append(Av - u[k] + w + b) #rG[k+1] xiG.append(ratio * (xi_hat[k] + rG[k+1+step])) #xiG[k+1] #Set up of new variables Dlambda = rho[k]*xiG[k+1+step] - rho[k+step]*xiG[k+step] DH = csr_matrix.dot(A, v[k+1] - np.squeeze(v[k])) DG = - u[k+1] + u[k] #Definitions of inner products Dlambda_dot = np.dot(np.transpose(Dlambda),Dlambda) DH_dot = np.dot(np.transpose(DH),DH) DG_dot = np.dot(np.transpose(DG),DG) DH_Dlambda_dot = np.dot(np.transpose(DH),Dlambda) DG_Dlambda_dot = np.dot(np.transpose(DG),Dlambda) #Definitions of norms Dlambda_norm = np.linalg.norm(Dlambda) DH_norm = np.linalg.norm(DH) DG_norm = np.linalg.norm(DG) #Definition of alfa and beta SD/MG alfa_SD = Dlambda_dot / DH_Dlambda_dot alfa_MG = DH_Dlambda_dot / DH_dot beta_SD = Dlambda_dot / DG_Dlambda_dot beta_MG = DG_Dlambda_dot / DG_dot #Election of alfa and beta hat if 2.0*alfa_MG > alfa_SD: alfa_hat = alfa_MG else: alfa_hat = alfa_SD - alfa_MG/2.0 if 2.0*beta_MG > beta_SD: beta_hat = beta_MG else: beta_hat = beta_SD - beta_MG/2.0 #Correlations alfa_corr = DH_Dlambda_dot / (DH_norm * Dlambda_norm) beta_corr = DG_Dlambda_dot / (DG_norm * Dlambda_norm) #Penalty parameter update if alfa_corr > eps_corr and beta_corr > eps_corr: rhos = np.sqrt(alfa_hat*beta_hat) elif alfa_corr > eps_corr and beta_corr <= eps_corr: rhos = alfa_hat elif alfa_corr <= eps_corr and beta_corr > eps_corr: rhos = beta_hat else: rhos = rho[k] return rhos else: return rho[k] <file_sep>/ADMM_Master_scipy_inside/ADMM/vp_RR_He.py ''' % Solves the following problem via ADMM: % % minimize (1/2)*v'*M*v + f'*v + indicator(u) % subject to u = Av + b ''' def vp_RR_He(problem_data, rho_method): ###################### ## IMPORT LIBRARIES ## ###################### #Math libraries import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import linalg #Timing import time #Import data from Data.read_fclib import * #Plot residuals from Solver.ADMM_iteration.Numerics.plot import * #Initial penalty parameter import Solver.Rho.Optimal #Max iterations and kind of tolerance from Solver.Tolerance.iter_totaltolerance import * #Acceleration from Solver.Acceleration.plusr_vp import * #Varying penalty parameter from Solver.Rho.Varying.He import * #b = Es matrix from Data.Es_matrix import * #Db = DEs matrix (derivative) from Data.DEs_matrix import * #Projection onto second order cone from Solver.ADMM_iteration.Numerics.projection import * ################################## ############# REQUIRE ############ ################################## start = time.clock() problem = hdf5_file(problem_data) M = problem.M.tocsc() f = problem.f A = csc_matrix.transpose(problem.H.tocsc()) A_T = csr_matrix.transpose(A) w = problem.w mu = problem.mu #Dimensions (normal,tangential,tangential) dim1 = 3 dim2 = np.shape(w)[0] #Problem size n = np.shape(M)[0] p = np.shape(w)[0] b = [Es_matrix(w,mu,np.zeros([p,]))] ################################# ############# SET-UP ############ ################################# #Set-up of vectors v = [np.zeros([n,])] u = [np.zeros([p,])] #this is u tilde, but in the notation of the paper is used as hat [np.zeros([10,0])] u_hat = [np.zeros([p,])] #u_hat[0] #in the notation of the paper this used with a underline xi = [np.zeros([p,])] xi_hat = [np.zeros([p,])] r = [np.zeros([p,])] #primal residual s = [np.zeros([p,])] #dual residual r_norm = [0] s_norm = [0] tau = [1] #over-relaxation e = [] #restart rho = [] #Optimal penalty parameter rho_string = 'Solver.Rho.Optimal.' + rho_method + '(A,M,A_T)' rh = eval(rho_string) rho.append(rh) #rho[0] #Plot rho_plot = [] b_plot = [] u_bin_plot = [] xi_bin_plot = [] siconos_plot = [] ################ ## ITERATIONS ## ################ for k in range(MAXITER): print k #Super LU factorization of M + rho * dot(M_T,M) if rho[k] != rho[k-1] or k == 0: P = M + rho[k] * csc_matrix.dot(A_T,A) LU = linalg.splu(P) LU_old = LU else: LU = LU_old ################ ## v - update ## ################ RHS = -f + rho[k] * csc_matrix.dot(A_T, -w - b[k] - xi_hat[k] + u_hat[k]) v.append(LU.solve(RHS)) #v[k+1] ################ ## b - update ## ################ Av = csr_matrix.dot(A,v[k+1]) b.append(Es_matrix(w,mu,Av + w)) ################ ## u - update ## ################ vector = Av + xi_hat[k] + w + b[k+1] u.append(projection(vector,mu,dim1,dim2)) #u[k+1] ######################## ## residuals - update ## ######################## s.append(rho[k] * csc_matrix.dot(A_T,(u[k+1]-u_hat[k]))) #s[k+1] r.append(Av - u[k+1] + w + b[k+1]) #r[k+1] ################# ## xi - update ## ################# ratio = rho[k-1]/rho[k] #update of dual scaled variable with new rho xi.append(ratio*(xi_hat[k] + r[k+1])) #xi[k+1] ################################### ## accelerated ADMM with restart ## ################################### plusr(tau,u,u_hat,xi,xi_hat,k,e,rho,ratio) ################################ ## penalty parameter - update ## ################################ r_norm.append(np.linalg.norm(r[k+1])) s_norm.append(np.linalg.norm(s[k+1])) rho.append(penalty(rho[k],r_norm[k+1],s_norm[k+1])) #################### ## stop criterion ## #################### pri_evalf = np.amax(np.array([np.linalg.norm(csr_matrix.dot(A,v[k+1])),np.linalg.norm(u[k+1]),np.linalg.norm(w + b[k+1])])) eps_pri = np.sqrt(p)*ABSTOL + RELTOL*pri_evalf dual_evalf = np.linalg.norm(rho[k] * csc_matrix.dot(A_T,xi[k+1])) eps_dual = np.sqrt(n)*ABSTOL + RELTOL*dual_evalf R = -rho[k]*xi[k+1] N1 = csc_matrix.dot(M, v[k+1]) - csc_matrix.dot(A_T, R) + f N2 = u[k+1] - projection(u[k+1] - R, mu, dim1, dim2) N1_norm = np.linalg.norm(N1) N2_norm = np.linalg.norm(N2) siconos_plot.append(np.sqrt( N1_norm**2 + N2_norm**2 )) if r_norm[k+1]<=eps_pri and s_norm[k+1]<=eps_dual: for element in range(len(u)): #Relative velocity u_proj = projection(u[element],mu,dim1,dim2) u_proj_contact = np.split(u_proj,dim2/dim1) u_contact = np.split(u[element],dim2/dim1) u_count = 0.0 for contact in range(dim2/dim1): if np.allclose(u_contact[contact], u_proj_contact[contact], rtol=0.1, atol=0.0): u_count += 1.0 u_bin = 100 * u_count / (dim2/dim1) u_bin_plot.append(u_bin) #Reaction xi_proj = projection(-1.0 * xi[element],1/mu,dim1,dim2) xi_proj_contact = np.split(xi_proj,dim2/dim1) xi_contact = np.split(-1.0 * xi[element],dim2/dim1) xi_count = 0.0 for contact in range(dim2/dim1): if np.allclose(xi_contact[contact], xi_proj_contact[contact], rtol=0.1, atol=0.0): xi_count += 1.0 xi_bin = 100 * xi_count / (dim2/dim1) xi_bin_plot.append(xi_bin) for element in range(len(r_norm)): rho_plot.append(rho[element]) b_plot.append(np.linalg.norm(b[element])) #R = -rho[k]*xi[k+1] #N1 = csc_matrix.dot(M, v[k+1]) - csc_matrix.dot(A_T, R) + f #N2 = u[k+1] - projection(u[k+1] - R, mu, dim1, dim2) #N1_norm = np.linalg.norm(N1) #N2_norm = np.linalg.norm(N2) #print np.sqrt( N1_norm**2 + N2_norm**2 ) print b_plot[-1] print b[-1][:3] print b[-1][-3:] break b_per_contact_j1 = np.split(b[k+1],dim2/dim1) b_per_contact_j0 = np.split(b[k],dim2/dim1) count = 0 for j in range(dim2/dim1): if np.linalg.norm(b_per_contact_j1[j] - b_per_contact_j0[j]) / np.linalg.norm(b_per_contact_j0[j]) > 1e-03: count += 1 if k == MAXITER-1: #count < 1 for element in range(len(u)): #Relative velocity u_proj = projection(u[element],mu,dim1,dim2) u_proj_contact = np.split(u_proj,dim2/dim1) u_contact = np.split(u[element],dim2/dim1) u_count = 0.0 for contact in range(dim2/dim1): if np.allclose(u_contact[contact], u_proj_contact[contact], rtol=0.1, atol=0.0): u_count += 1.0 u_bin = 100 * u_count / (dim2/dim1) u_bin_plot.append(u_bin) #Reaction xi_proj = projection(-1.0 * xi[element],1/mu,dim1,dim2) xi_proj_contact = np.split(xi_proj,dim2/dim1) xi_contact = np.split(-1.0 * xi[element],dim2/dim1) xi_count = 0.0 for contact in range(dim2/dim1): if np.allclose(xi_contact[contact], xi_proj_contact[contact], rtol=0.1, atol=0.0): xi_count += 1.0 xi_bin = 100 * xi_count / (dim2/dim1) xi_bin_plot.append(xi_bin) for element in range(len(r_norm)): rho_plot.append(rho[element]) b_plot.append(np.linalg.norm(b[element])) #R = -rho[k]*xi[k+1] #N1 = csc_matrix.dot(M, v[k+1]) - csc_matrix.dot(A_T, R) + f #N2 = u[k+1] - projection(u[k+1] - R, mu, dim1, dim2) #N1_norm = np.linalg.norm(N1) #N2_norm = np.linalg.norm(N2) #print np.sqrt( N1_norm**2 + N2_norm**2 ) print b_plot[-1] print b[-1][:3] print b[-1][-3:] break #end rutine end = time.clock() #################### ## REPORTING DATA ## #################### f, axarr = plt.subplots(5, sharex=True) f.suptitle('Internal update with vp_RR_He (Di Cairano)') axarr[0].semilogy(b_plot) axarr[0].set(ylabel='||Phi(s)||') axarr[1].plot(rho_plot) axarr[1].set(ylabel='Rho') axarr[2].semilogy(r_norm, label='||r||') axarr[2].semilogy(s_norm, label='||s||') axarr[2].legend() axarr[2].set(ylabel='Residuals') axarr[3].semilogy(siconos_plot) axarr[3].set(ylabel='SICONOS error') axarr[4].plot(u_bin_plot, label='u in K*') axarr[4].plot(xi_bin_plot, label='-xi in K') axarr[4].legend() axarr[4].set(xlabel='Iteration', ylabel='Projection (%)') plt.show() plt.show() #print b[-1] #print np.linalg.norm(b[-1]) #plotit(r,b,start,end,'With acceleration / Without restarting for '+problem_data+' for rho: '+rho_method) #plotit(r,s,start,end,'Internal update with vp_RR_He (Di Cairano)') time = end - start print 'Total time: ', time return time <file_sep>/ADMM_Master_scipy_inside/ADMM/Data/DEs_matrix.py import numpy as np from scipy.sparse import csc_matrix def DEs_matrix(w,mu,u,H_T): dim1 = 3 dim2 = w.shape[0] u_per_contact = np.split(u,dim2/dim1) H_T_per_contact = np.split(H_T,dim2/dim1) for i in range(dim2/dim1): if i == 0: C1 = mu[i]/np.linalg.norm(u_per_contact[i][1:]) C2 = np.dot(np.array([1,0,0]),u_per_contact[i]) C3 = np.dot(np.array([[0,0,0],[0,1,0],[0,0,1]]), H_T_per_contact[i]) DE_ = C1*C2*C3 else: C1 = mu[i]/np.linalg.norm(u_per_contact[i][1:]) C2 = np.dot(np.array([1,0,0]),u_per_contact[i]) C3 = np.dot(np.array([[0,0,0],[0,1,0],[0,0,1]]), H_T_per_contact[i]) DE_ = np.concatenate((DE_,C1*C2*C3)) DE = np.transpose(DE_) return csc_matrix(DE) <file_sep>/ADMM_Master_scipy_outside/ADMM/Solver/Tolerance/iter_totaltolerance.py MAXITER = 10000 ABSTOL = 1e-06 #scaling, otherwise 1e-03 RELTOL = 1e-04 <file_sep>/ADMM_Master_scipy_outside/ADMM/vp_RR_He (copy).py ''' % Solves the following problem via ADMM: % % minimize (1/2)*v'*M*v + f'*v + indicator(u) % subject to u = Av + b ''' def vp_RR_He(problem_data, rho_method): ###################### ## IMPORT LIBRARIES ## ###################### #Math libraries import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import linalg #Timing import time #Import data from Data.read_fclib import * #Plot residuals from Solver.ADMM_iteration.Numerics.plot import * #Initial penalty parameter import Solver.Rho.Optimal #Max iterations and kind of tolerance from Solver.Tolerance.iter_totaltolerance import * #Acceleration from Solver.Acceleration.plusr_vp import * #Varying penalty parameter from Solver.Rho.Varying.He import * #b = Es matrix from Data.Es_matrix import * #Projection onto second order cone from Solver.ADMM_iteration.Numerics.projection import * ################################## ############# REQUIRE ############ ################################## start = time.clock() problem = hdf5_file(problem_data) M = problem.M.tocsc() f = problem.f A = csc_matrix.transpose(problem.H.tocsc()) A_T = csr_matrix.transpose(A) w = problem.w mu = problem.mu #Dimensions (normal,tangential,tangential) dim1 = 3 dim2 = np.shape(w)[0] #Problem size n = np.shape(M)[0] p = np.shape(w)[0] b = [1/linalg.norm(A,'fro') * Es_matrix(w,mu,np.zeros([p,])) / np.linalg.norm(Es_matrix(w,mu,np.ones([p,])))] ################################# ############# SET-UP ############ ################################# #Set-up of vectors v = [np.zeros([n,])] u = [np.zeros([p,])] #this is u tilde, but in the notation of the paper is used as hat [np.zeros([10,0])] u_hat = [np.zeros([p,])] #u_hat[0] #in the notation of the paper this used with a underline xi = [np.zeros([p,])] xi_hat = [np.zeros([p,])] r = [np.zeros([p,])] #primal residual s = [np.zeros([p,])] #dual residual r_norm = [0] s_norm = [0] tau = [1] #over-relaxation e = [] #restart rho = [] #Optimal penalty parameter rho_string = 'Solver.Rho.Optimal.' + rho_method + '(A,M,A_T)' rh = eval(rho_string) rho.append(rh) #rho[0] ################ ## ITERATIONS ## ################ for j in range(15): print j len_u = len(u)-1 for k in range(len_u,MAXITER): #Super LU factorization of M + rho * dot(M_T,M) if k == 0: #rho[k] != rho[k-1] or P = M + rho[k] * csc_matrix.dot(A_T,A) LU = linalg.splu(P) LU_old = LU else: LU = LU_old #rho[k] != rho[k-1] or ################ ## v - update ## ################ RHS = -f + rho[k] * csc_matrix.dot(A_T, -w - b[j] - xi_hat[k] + u_hat[k]) v.append(LU.solve(RHS)) #v[k+1] #P = M + rho[k] * csc_matrix.dot(A_T,A) #RHS = -f + rho[k] * csc_matrix.dot(A_T, -w - b[j] - xi_hat[k] + u_hat[k]) #v.append(linalg.spsolve(P,RHS)) #v[k+1] #P = M + rho[k] * csc_matrix.dot(A_T,A) #LU = linalg.factorized(P) #RHS = -f + rho[k] * csc_matrix.dot(A_T, -w - b[j] - xi_hat[k] + u_hat[k]) #v.append(LU(RHS)) #v[k+1] ################ ## u - update ## ################ Av = csr_matrix.dot(A,v[k+1]) vector = Av + xi_hat[k] + w + b[j] u.append(projection(vector,mu,dim1,dim2)) #u[k+1] ######################## ## residuals - update ## ######################## s.append(rho[k] * csc_matrix.dot(A_T,(u[k+1]-u_hat[k]))) #s[k+1] r.append(Av - u[k+1] + w + b[j]) #r[k+1] ################# ## xi - update ## ################# ratio = rho[k-1]/rho[k] #update of dual scaled variable with new rho xi.append(ratio*(xi_hat[k] + r[k+1])) #xi[k+1] if j!=0 and k!=0 and j < 2: #from mpl_toolkits.mplot3d import Axes3D #soa = np.array([np.concatenate((xi[k+1][:3],np.array([0,0,0]))).tolist()]) #np.concatenate((xi[k][:3],np.array([0,0,0]))).tolist() #print rho[k]*xi[k+1][:3] * 1e19 print u[k+1][:3] #X, Y, Z, U, V, W = zip(*soa) #fig = plt.figure() #ax = fig.add_subplot(111, projection='3d') #ax.quiver(X, Y, Z, U, V, W) #ax.set_xlim([-1, 1]) #ax.set_ylim([-1, 1]) #ax.set_zlim([-1, 1]) #plt.show() #plotit(xi[-2:], xi[-2:], start, 5.0,'External update with vp_RR_He (Di Cairano)') #print Av[-3:] - u[k+1][-3:] + w[-3:] ################################### ## accelerated ADMM with restart ## ################################### plusr(tau,u,u_hat,xi,xi_hat,k,e,rho,ratio) ################################ ## penalty parameter - update ## ################################ r_norm.append(np.linalg.norm(r[k+1])) s_norm.append(np.linalg.norm(s[k+1])) rho.append(penalty(rho[k],r_norm[k+1],s_norm[k+1])) #rho.append(0.125) #################### ## stop criterion ## #################### pri_evalf = np.amax(np.array([np.linalg.norm(csr_matrix.dot(A,v[k+1])),np.linalg.norm(u[k+1]),np.linalg.norm(w + b[j])])) eps_pri = np.sqrt(p)*ABSTOL + RELTOL*pri_evalf dual_evalf = np.linalg.norm(rho[k] * csc_matrix.dot(A_T,xi[k+1])) eps_dual = np.sqrt(n)*ABSTOL + RELTOL*dual_evalf if r_norm[k+1]<=eps_pri and s_norm[k+1]<=eps_dual: R = rho[k]*xi[k+1] N1 = csc_matrix.dot(M, v[k+1]) - csc_matrix.dot(A_T, R) + f N2 = R - projection(R - u[k+1], 1/mu, dim1, dim2) N1_norm = np.linalg.norm(N1) N2_norm = np.linalg.norm(N2) print np.sqrt( N1_norm**2 + N2_norm**2 ) #print rho[k]*xi[k+1] #plotit(r[len_u:], xi[len_u:], start, 5.0,'External update with vp_RR_He (Di Cairano)') break #end rutine #b(s) stop criterion b.append(Es_matrix(w,mu,Av + w)) if j == 0: pass else: b_per_contact_j1 = np.split(b[j+1],dim2/dim1) b_per_contact_j0 = np.split(b[j],dim2/dim1) count = 0 for i in range(dim2/dim1): if np.linalg.norm(b_per_contact_j1[i] - b_per_contact_j0[i]) / np.linalg.norm(b_per_contact_j0[i]) > 1e-03: count += 1 if count < 1: #orthogonal = np.dot(u[-1],rho[-2]*xi[-1]) #print orthogonal break v.append(np.zeros([n,])) u.append(np.zeros([p,])) u_hat.append(np.zeros([p,])) xi.append(np.zeros([p,])) xi_hat.append(np.zeros([p,])) r.append(np.zeros([p,])) #primal residual s.append(np.zeros([p,])) #dual residual r_norm.append(0) s_norm.append(0) tau.append(1) #over-relaxation e.append(np.nan) #restart rho.append(rho[-1]) end = time.clock() time = end - start #################### ## REPORTING DATA ## #################### print P.nnz print np.shape(P) f, axarr = plt.subplots(2, sharex=True) f.suptitle('Sharing X axis') axarr[0].plot(rho, label='rho') axarr[1].semilogy(r_norm, label='||r||') axarr[1].semilogy(s_norm, label='||s||') plt.show() #plt.semilogy(r_norm, label='||r||') #plt.hold(True) #plt.semilogy(rho, label='||s||') #plt.hold(True) #plt.ylabel('Residuals') #plt.xlabel('Iteration') #plt.text(len(r)/2,np.log(np.amax(S)+np.amax(R))/10,'N_iter = '+str(len(r)-1)) #plt.text(len(r)/2,np.log(np.amax(S)+np.amax(R))/100,'Total time = '+str((end-start)*10**3)+' ms') #plt.text(len(r)/2,np.log(np.amax(S)+np.amax(R))/1000,'Time_per_iter = '+str(((end-start)/(len(r)-1))*10**3)+' ms') #plt.title('External update with vp_RR_He (Di Cairano)') #plt.legend() plt.show() #print 'Total time: ',time return time #print b[-1] #print np.linalg.norm(b[-1]) #plotit(b,s,start,end,'With acceleration / Without restarting for '+problem_data+' for rho: '+rho_method) #plotit(r, rho, start,end,'External update with vp_RR_He (Di Cairano)') <file_sep>/S-update/ADMM_Master_scipy_outside/ADMM/cp_RR.py ''' % Solves the following problem via ADMM: % % minimize (1/2)*v'*M*v + f'*v + indicator(u) % subject to u = Av + b ''' def cp_RR(problem_data, rho_method): ###################### ## IMPORT LIBRARIES ## ###################### #Math libraries import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import linalg #Timing import time #Import data from Data.read_fclib import * #Plot residuals from Solver.ADMM_iteration.Numerics.plot import * #Initial penalty parameter import Solver.Rho.Optimal #Max iterations and kind of tolerance from Solver.Tolerance.iter_totaltolerance import * #Acceleration from Solver.Acceleration.plusr import * #b = Es matrix from Data.Es_matrix import * #Projection onto second order cone from Solver.ADMM_iteration.Numerics.projection import * ##################################################### ############# TERMS / NOT A FUNCTION YET ############ ##################################################### start = time.clock() problem = hdf5_file(problem_data) M = problem.M.tocsc() f = problem.f A = csc_matrix.transpose(problem.H.tocsc()) A_T = csr_matrix.transpose(A) w = problem.w mu = problem.mu #Dimensions (normal,tangential,tangential) dim1 = 3 dim2 = np.shape(w)[0] #Problem size n = np.shape(M)[0] p = np.shape(w)[0] b = [1/linalg.norm(A,'fro') * Es_matrix(w,mu,np.zeros([p,])) / np.linalg.norm(Es_matrix(w,mu,np.ones([p,])))] ################################# ############# SET-UP ############ ################################# #Set-up of vectors v = [np.zeros([n,])] u = [np.zeros([p,])] #this is u tilde, but in the notation of the paper is used as hat [np.zeros([10,0])] u_hat = [np.zeros([p,])] #u_hat[0] #in the notation of the paper this used with a underline xi = [np.zeros([p,])] xi_hat = [np.zeros([p,])] r = [np.zeros([p,])] #primal residual s = [np.zeros([p,])] #dual residual r_norm = [0] s_norm = [0] tau = [1] #over-relaxation e = [] #restart #Optimal penalty parameter rho_string = 'Solver.Rho.Optimal.' + rho_method + '(A,M,A_T)' rho = eval(rho_string) #Plot r_plot = [] s_plot = [] b_plot = [] u_bin_plot = [] xi_bin_plot = [] ######################################## ## TERMS COMMON TO ALL THE ITERATIONS ## ######################################## #Super LU factorization of M + rho * dot(M_T,M) P = M + rho * csc_matrix.dot(A_T,A) LU = linalg.splu(P) ################ ## ITERATIONS ## ################ for j in range(20): print j len_u = len(u)-1 for k in range(len_u,MAXITER): ################ ## v - update ## ################ RHS = -f + rho * csc_matrix.dot(A_T, -w - b[j] - xi_hat[k] + u_hat[k]) v.append(LU.solve(RHS)) #v[k+1] ################ ## u - update ## ################ Av = csr_matrix.dot(A,v[k+1]) vector = Av + xi_hat[k] + w + b[j] u.append(projection(vector,mu,dim1,dim2)) #u[k+1] ######################## ## residuals - update ## ######################## s.append(rho * csc_matrix.dot(A_T,(u[k+1]-u_hat[k]))) #s[k+1] r.append(Av - u[k+1] + w + b[j]) #r[k+1] ################# ## xi - update ## ################# xi.append(xi_hat[k] + r[k+1]) #xi[k+1] ################################### ## accelerated ADMM with restart ## ################################### plusr(tau,u,u_hat,xi,xi_hat,k,e,rho) #################### ## stop criterion ## #################### pri_evalf = np.amax(np.array([np.linalg.norm(csr_matrix.dot(A,v[k+1])),np.linalg.norm(u[k+1]),np.linalg.norm(w + b[j])])) eps_pri = np.sqrt(p)*ABSTOL + RELTOL*pri_evalf dual_evalf = np.linalg.norm(rho * csc_matrix.dot(A_T,xi[k+1])) eps_dual = np.sqrt(n)*ABSTOL + RELTOL*dual_evalf r_norm.append(np.linalg.norm(r[k+1])) s_norm.append(np.linalg.norm(s[k+1])) if r_norm[k+1]<=eps_pri and s_norm[k+1]<=eps_dual: for element in range(len(u)): #Relative velocity u_proj = projection(u[element],mu,dim1,dim2) u_proj_contact = np.split(u_proj,dim2/dim1) u_contact = np.split(u[element],dim2/dim1) u_count = 0.0 for contact in range(dim2/dim1): if np.array_equiv(u_contact[contact], u_proj_contact[contact]): u_count += 1.0 u_bin = 100 * u_count / (dim2/dim1) u_bin_plot.append(u_bin) #Reaction xi_proj = projection(xi[element],1/mu,dim1,dim2) xi_proj_contact = np.split(xi_proj,dim2/dim1) xi_contact = np.split(xi[element],dim2/dim1) xi_count = 0.0 for contact in range(dim2/dim1): if np.array_equiv(xi_contact[contact], xi_proj_contact[contact]): xi_count += 1.0 xi_bin = 100 * xi_count / (dim2/dim1) xi_bin_plot.append(xi_bin) for element in range(len(r_norm)): r_plot.append(r_norm[element]) s_plot.append(s_norm[element]) b_plot.append(np.linalg.norm(b[j])) #print 'First contact' #print rho*xi[k+1][:3] #uy = projection(rho*xi[k+1],1/mu,dim1,dim2) #print uy[:3] #print 'Last contact' #print rho*xi[k+1][-3:] #print uy[-3:] #print u[k+1] #R = rho*xi[k+1] #N1 = csc_matrix.dot(M, v[k+1]) - csc_matrix.dot(A_T, R) + f #N2 = R - projection(R - u[k+1], 1/mu, dim1, dim2) #N1_norm = np.linalg.norm(N1) #N2_norm = np.linalg.norm(N2) #print np.sqrt( N1_norm**2 + N2_norm**2 ) break #b(s) stop criterion b.append(Es_matrix(w,mu,Av + w)) if j == 0: pass else: b_per_contact_j1 = np.split(b[j+1],dim2/dim1) b_per_contact_j0 = np.split(b[j],dim2/dim1) count = 0 for i in range(dim2/dim1): if np.linalg.norm(b_per_contact_j1[i] - b_per_contact_j0[i]) / np.linalg.norm(b_per_contact_j0[i]) > 1e-03: count += 1 if count < 1: break v = [np.zeros([n,])] u = [np.zeros([p,])] #this is u tilde, but in the notation of the paper is used as hat [np.zeros([10,0])] u_hat = [np.zeros([p,])] #u_hat[0] #in the notation of the paper this used with a underline xi = [np.zeros([p,])] xi_hat = [np.zeros([p,])] r = [np.zeros([p,])] #primal residual s = [np.zeros([p,])] #dual residual r_norm = [0] s_norm = [0] tau = [1] #over-relaxation e = [] #restart end = time.clock() #################### ## REPORTING DATA ## #################### f, axarr = plt.subplots(4, sharex=True) f.suptitle('External update with cp_RR (Acary)') axarr[0].semilogy(b_plot) axarr[0].set(ylabel='||Phi(s)||') axarr[1].axhline(y = rho) axarr[1].set(ylabel='Rho') axarr[2].semilogy(r_plot, label='||r||') axarr[2].semilogy(s_plot, label='||s||') axarr[2].set(ylabel='Residuals') axarr[3].plot(u_bin_plot, label='u in K*') axarr[3].plot(xi_bin_plot, label='xi in K') axarr[3].legend() axarr[3].set(xlabel='Iteration', ylabel='Projection (%)') plt.show() #plotit(r,b,start,end,'With acceleration / With restarting for '+problem_data+' for rho: '+rho_method) time = end - start print 'Total time: ', time return time <file_sep>/ADMM_Master_scipy/ADMM/Solver/Rho/Optimal/nicolas.py import numpy as np def nicolas(A,M,A_T): M_norm1 = np.linalg.norm(M,1) A_norm1 = np.linalg.norm(A,1) optimal = (np.sqrt(M_norm1) / A_norm1) return optimal <file_sep>/S-update/ADMM_Master_scipy_outside/ADMM/Solver/Rho/Optimal/__init__.py from acary import * from dicairano import * from ghadimi import * from normal import * from nicolas import * <file_sep>/ADMM_Master_scipy_inside/ADMM/Solver/Rho/Optimal/normal.py import numpy as np def normal(A,M,A_T): return 1.0 <file_sep>/ADMM_Master_scipy_outside/ADMM/Solver/Rho/Optimal/acary.py import numpy as np def acary(A,M,A_T): M_norm1 = np.linalg.norm(M.toarray(),1) A_norm1 = np.linalg.norm(A.toarray(),1) return M_norm1/A_norm1 <file_sep>/ADMM_Master_scipy_inside/ADMM/Solver/Rho/Varying/He.py #Penalty parameter def penalty(rho,r_norm,s_norm): mu = 10.0 factor = 2.0 if r_norm > mu * s_norm: return rho*factor elif s_norm > mu * r_norm: return rho/factor else: return rho <file_sep>/ADMM_Master_scipy_outside/ADMM/Solver/Rho/Varying/Wohlberg.py import numpy as np def penalty(rho,r_norm,s_norm,mu): mu = 10.0 factormax = 100.0 ratiosqrt = np.sqrt(r_norm / s_norm) if 1.0 <= ratiosqrt and ratiosqrt < factormax: factor = ratiosqrt elif 1.0/factormax < ratiosqrt and ratiosqrt < 1.0: factor = 1.0/ratiosqrt else: factor = factormax if r_norm > mu * s_norm: rhos = rho*factor elif s_norm > mu * r_norm: rhos = rho/factor else: rhos = rho return rhos <file_sep>/ADMM_Master_scipy_inside/Master_plot.py ######################################################### ###################### IMPORT DATA ###################### ######################################################### import pickle performance_profile = pickle.load( open( "performance_profile.p", "rb" ) ) ######################################################### ######################### CODE ########################## ######################################################### #Import librearies import numpy as np import matplotlib.pyplot as plt #Definition of list color = ['#9ACD32','#FFFF00','#40E0D0','#FF6347','#A0522D','#FA8072','#FFA500','#808000','#000080','#006400','#0000FF','#000000'] #['yellowgreen','yellow','violet','turquoise','tomato','sienna','salmon','orange','olive','navy','darkgreen','blue','black'] tau_ratio = np.arange(1.0,11.0,0.01) all_solvers = ['cp_N', 'cp_R', 'cp_RR', 'vp_N_He', 'vp_R_He', 'vp_RR_He', 'vp_N_Spectral', 'vp_R_Spectral', 'vp_RR_Spectral', 'vp_N_Wohlberg', 'vp_R_Wohlberg', 'vp_RR_Wohlberg'] rho_optimal = ['acary', 'dicairano', 'ghadimi', 'normal'] #Plot for each_rho_time in range(len(rho_optimal)): for s in range(len(all_solvers)): plt.plot(tau_ratio, performance_profile[s][each_rho_time], color[s], label = all_solvers[s]) plt.hold(True) plt.ylabel('Performance') plt.xlabel('Tau') plt.title('Performance profiles for '+rho_optimal[each_rho_time]) plt.legend() plt.show() <file_sep>/S-update/ADMM_Master_scipy_inside/ADMM/Solver/Rho/Optimal/dicairano.py import numpy as np from scipy.sparse import linalg def dicairano(A,M,A_T): eig,eig_vec = linalg.eigs(M) eigmax = np.absolute( np.amax(eig) ) eigmin = np.absolute( np.min(eig[np.nonzero(eig)]) ) return np.sqrt(eigmax*eigmin) <file_sep>/S-update/ADMM_Master_scipy_inside/ADMM/Solver/Rho/Optimal/ghadimi.py import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import linalg def ghadimi(A,M,A_T): DUAL = csr_matrix.dot(csr_matrix.dot(A,linalg.inv(M)),A_T) eig,eig_vect = linalg.eigs(DUAL) #sparse eigmax = np.absolute(np.amax(eig)) eigmin = np.absolute(np.min(eig[np.nonzero(eig)])) return 1 / np.sqrt(eigmax*eigmin) <file_sep>/S-update/ADMM_Master_scipy/ADMM/Solver/Acceleration/minusr.py ###################################### ## accelerated ADMM without restart ## ###################################### import numpy as np def minusr(tau,u,u_hat,xi,xi_hat,k): tau.append(0.5 * (1 + np.sqrt(1 + 4 * np.square(tau[k])))) #tau[k+1] alpha = (tau[k] - 1) / tau[k+1] u_hat.append(u[k+1] + alpha * (u[k+1] - u[k])) #u_hat[k+1] xi_hat.append(xi[k+1] + alpha * (xi[k+1] - xi[k])) #xi_hat[k+1] <file_sep>/ADMM_Master_scipy/ADMM/Data/Es_matrix.py import numpy as np def Es_matrix(w,mu,u): dim1 = 3 dim2 = w.shape[0] E_ = np.array([]) u_per_contact = np.split(u,dim2/dim1) for i in range(dim2/dim1): E_ = np.concatenate((E_,np.array([1,0,0])*mu[i]*np.linalg.norm(u_per_contact[i][1:]))) E = E_[:,np.newaxis] return np.squeeze(E) <file_sep>/ADMM_Master_scipy_inside/ADMM/Solver/ADMM_iteration/Numerics/projection.py import numpy as np def projection(vector,mu,dim1,dim2): vector_per_contact = np.split(vector,dim2/dim1) projected = np.array([]) for i in range(dim2/dim1): mui = mu[i] x1 = vector_per_contact[i][0] normx2 = np.linalg.norm(vector_per_contact[i][1:]) if normx2 <= (-mui)*x1: projected = np.concatenate((projected,np.zeros([dim1,]))) elif normx2 <= (1/mui)*x1: projected = np.concatenate((projected,vector_per_contact[i])) else: x2 = vector_per_contact[i][1:] projected = np.concatenate((projected,(mui**2)/(1+mui**2) * (x1 + (1/mui)*normx2) * np.concatenate((np.array([1]),(1/mui)*x2*(1/normx2))))) return projected <file_sep>/README.md ADMM for Frictional Contact ======= ## Introduction Implementation of friction model as a parametric quadratic optimization problem with second-order cone constraints coupled with a fixed point equation. See paper in this [link](https://hal.inria.fr/inria-00495734). --- ## Table of Contents - [ADMM](https://github.com/molinavergara24/inria-admm#ADMM) - [Data](https://github.com/molinavergara24/inria-admm#Data) - [Solver](https://github.com/molinavergara24/inria-admm#Solver) - [Backup_time](https://github.com/molinavergara24/inria-admm#Backup_time) - [Master_*](https://github.com/molinavergara24/inria-admm#Master_*) --- ## ADMM ADMM implementation. ### Data Test examples. ### Solver Libraries and functions of ADMM implementation. ## Master_* Performance profile: time -> ratio -> performance -> plot. <file_sep>/ADMM_Master_scipy_outside/ADMM/Solver/ADMM_iteration/Numerics/plot.py import numpy as np import matplotlib.pyplot as plt def plotit(r,s,start,end,title): R = [np.linalg.norm(k) for k in r] S = [np.linalg.norm(k) for k in s] plt.semilogy(R, label='||r||') plt.hold(True) plt.semilogy(S, label='||s||') plt.hold(True) plt.ylabel('Residuals') plt.xlabel('Iteration') #plt.text(len(r)/2,np.log(np.amax(S)+np.amax(R))/10,'N_iter = '+str(len(r)-1)) #plt.text(len(r)/2,np.log(np.amax(S)+np.amax(R))/100,'Total time = '+str((end-start)*10**3)+' ms') #plt.text(len(r)/2,np.log(np.amax(S)+np.amax(R))/1000,'Time_per_iter = '+str(((end-start)/(len(r)-1))*10**3)+' ms') plt.title(title) plt.legend() plt.show() ''' R = [np.linalg.norm(k) for k in r] plt.semilogy(R) plt.hold(True) plt.ylabel('||Phi(s)||') plt.xlabel('Iteration') plt.title('External update with vp_RR_He (Di Cairano)') plt.show() '''
d04fe5b0691e7c7942b03960135fa42c5afb9152
[ "Markdown", "Python" ]
23
Python
vacary/inria-admm
b4b6ba4b97d4b73791203415bdf49d5033847158
a1ef8bca7328b7a3015e8f457c0035a43695f57d
refs/heads/master
<repo_name>marissa-lc/Peer-One<file_sep>/README.md # Peer-One ## Description A peer to peer tutoring app where users can post requests for help with coding-related questions and receive help from their peers. The app uses randomly generated usernames to keep posts anonymous so that users remain in a safe, judgement-free environment. ### Technologies Used 1. MySQL 2. Express 3. Express-Handlebars 4. Custom ORM 5. WordPOS 6. UIKit ### Website Components 1. Login Page - New users can click a link to proceed with a sign up process 2. Skill request page - Post a help request, or - Specify what skills can provide help with 3. Feed - Post help request button - Update strengths/skills button - Posts are displayed in cards with anonymous username and post body as well as a chat button and a post answer button - Logout icon ## User Experience ![Login](https://i.imgur.com/XdR5KJd.jpg) ![Email Password](https://i.imgur.com/VxWud3V.jpg) ![Username](https://i.imgur.com/rAQdnqh.jpg) ![Strengths](https://i.imgur.com/QWyaii2.jpg) ![Skills Page](https://i.imgur.com/nhZWuSv.jpg) ![Feed](https://i.imgur.com/HUraqRA.jpg) ![Help Request](https://i.imgur.com/RCHyPOE.jpg) ![Add Strengths](https://i.imgur.com/tRmXsCF.jpg) ![Post Answer](https://i.imgur.com/q6plAU1.jpg) <file_sep>/config/passport.js const passport = require("passport"); const LocalStrategy = require("passport-local").Strategy; var db = require("../models"); // Telling passport we want to use a Local Strategy. In other words, we want login with a username/email and password passport.use(new LocalStrategy( { usernameField: "email" }, function (email, password, cb) { db.user.validateLogin({ email: email, password: <PASSWORD> }, function (err, user) { if (err) { return cb(err); } if (!user) { return cb(null, false); } if (user.password !== password) { return cb(null, false); } return cb(null, user); }); })); // Just consider this part boilerplate needed to make it all work passport.serializeUser(function (user, cb) { cb(null, user); }); passport.deserializeUser(function (obj, cb) { cb(null, obj); }); // Exporting our configured passport module.exports = passport;<file_sep>/public/assets/js/signup.js $(document).ready(function () { // Variables for form items const usernameSpan = $("#username-span"); const emailField = $("#email-field"); const passwordField = $("#password-field"); const signupBtn = $("#signup-btn"); // Get a random username getRandomUsername(function (randomUsername) { $(usernameSpan).text(randomUsername); }); // Event handler for button to get another random name $("#username-btn").on("click", function (event) { event.preventDefault(); getRandomUsername(function (randomUsername) { $(usernameSpan).text(randomUsername); }); }); // Event handler for signup button click $(signupBtn).on("click", function (event) { event.preventDefault(); // Save and validate form entries const username = $(usernameSpan).text(); const email = $(emailField).val().trim(); const password = $(passwordField).val().trim(); if (email.length > 0) { if (password.length > 0) { // Create object for AJAX POST request const newUser = { username: username, email: email, password: <PASSWORD> }; // Make the AJAX POST request $.ajax("http://localhost:8080/api/signup", { method: "POST", data: newUser }) .then(function (response) { console.log(response); window.location.replace("login"); }) .catch(function () { alert("Server error. Couldn't create user."); }); } else { alert("Please enter a valid password."); } } else { alert("Please enter a valid email address."); } }); }); function getRandomUsername(callback) { $.get("http://localhost:8080/api/namegen") .then(function (response) { callback(response); }); }<file_sep>/db/seed.sql use peer_up_db; insert into users (username, email, password) values ('MossyVarietypack29', '<EMAIL>', '<PASSWORD>'), ('GlisteningJambalaya04', '<EMAIL>', 'pass'), ('DeepenedPlasma3', '<EMAIL>', 'pass'); insert into skills (subject) values ('HTML'),('CSS'),('JavaScript'),('jQuery'),('APIs'),('Git'),('Node'),('Express'),('MySQL'),('React'),('NoSQL'); insert into posts (body, user_id, skill_id, reply_to_id) values ('Need help with SQL', 1, 1, NULL), ('Need help with Node', 2, 2, NULL), ('Need help with jQuery', 3, 3, NULL), ('Hey, I can help with that!', 3, NULL, 1); <file_sep>/controllers/namegen.js const Wordpos = require("wordpos"); const getNames = function (callback) { const wordpos = new Wordpos(); const names = []; // Start recursive execution of function wordRecurse(); // Define a recursieve function. It gets a random name from the name generator API and adds it to the names array // If the array has fewer than five names in it, the function calls itself, getting another random name and again adding it to the array // Once the array has five names in it, the if statement evaluates to false and the function sends the response and exits function wordRecurse() { wordpos.randAdjective(function (adj) { wordpos.randNoun(function (noun) { const username = adj.toString().charAt(0).toUpperCase() + adj.toString().slice(1) + noun.toString().charAt(0).toUpperCase() + noun.toString().slice(1) + (Math.floor(Math.random() * 100)).toString(); names.push({ name: username }); if (names.length < 5) { return wordRecurse(); } return callback(names); }); }); } }; module.exports = getNames;<file_sep>/public/assets/js/feed.js // const newPost = $(".new-post"); const saveResponse = $(".save"); // const skillDropdown = $("#skill-dropdown"); // Populate dropdown list with skills // skillDropdown.empty(); // skillDropdown.append($("<option>choose one...</option>")); // getSkills(function (skills) { // skills.forEach(skill => { // const newOption = $(`<option value="${skill.id}">${skill.subject}</option>`); // skillDropdown.append(newOption); // }); // }); // newPost.on("click", function (event) { // // Get user info // getUserInfo(function (user) { // // Populate the JSON object // var addPost = { // userId: user.id, // skillId: $("#skill-dropdown:selected").text(), // body: $(".new-body").val().trim() // } // console.log(addPost.body); // $.ajax("/api/posts", { // type: "POST", // data: addPost // }).then(function () { // // Reload the page to get the updated list // window.location.replace("http://localhost:8080/feed"); // }); // }); // }); // saveResponse.on("click", function(event) { // var addResponse = { // userId: 1, // replyToId: 1, // body: $(".response-body").val().trim() // }; // console.log (addResponse.body); // $.ajax("/api/posts", { // type: "POST", // data: addResponse // }).then(function() { // // Reload the page to get the updated list // window.location.replace("http://localhost:8080/feed"); // }); // }); <file_sep>/models/answer.js const Query = require("../config/query"); const answer = { findForPost: function (postId, cb) { const query = new Query(); query.select( [ "replies.ID", "username", "replies.body" ]) .from( { name: "posts", as: "replies" } ) .innerJoin("posts", "posts.ID", "replies.reply_to_id") .innerJoin("users", "users.ID", "replies.user_id") .whereEqual("posts.ID", postId) .go(function(err, result) { if (err) { return cb(err); } cb(null, result); }); } }; module.exports = answer;<file_sep>/controllers/api-routes.js const Wordpos = require("wordpos"); const passport = require("../config/passport"); const db = require("../models"); module.exports = function (app) { // Using the passport.authenticate middleware with our local strategy. // If the user has valid login credentials, send them to the members page. // Otherwise the user will be sent an error app.post("/api/login", passport.authenticate("local"), function (req, res) { res.json(req.user); }); // Sign up a new user app.post("/api/signup", function (req, res) { db.user.create({ username: req.body.username, email: req.body.email, password: <PASSWORD> }, function (err) { if (err) { return res.send(err); } res.redirect("/login"); }); }); // Route for logging user out app.get("/api/logout", function (req, res) { req.logout(); res.redirect("/"); }); // Route for getting some data about our user to be used client side app.get("/api/user_info", function (req, res) { if (!req.user) { // The user is not logged in. Return an empty object return res.json({}); } // Otherwise send back the user's username, email and id res.json({ id: req.user.ID, username: req.user.username, email: req.user.email }); }); app.get("/api/skills", function (req, res) { db.skill.findAll(function (err, result) { if (err) { return res.status(401).send(err); } res.json(result); }); }); app.get("/api/answers/:postId", function (req, res) { db.answer.findForPost(req.params.postId, function (err, result) { if (err) { res.status(401).send(err); } res.json(result); }); }); app.post("/api/answers/:id", function ({ body }, res) { db.post.add({ userId: req.body.userId, skillId: null, body: req.body.body, replyToId: req.body.replyToId }, function (err) { if (err) { return res.status(401).send(err); } res.status(200).send(req.body); }); }); app.get("/api/posts", function (req, res) { db.post.findAll(function (err, result) { if (err) { return res.status(401).send(err); } res.json(result); }); }); app.post("/api/posts", function (req, res) { db.post.add({ userId: req.body.userId, skillId: req.body.skillId, body: req.body.body }, function (err) { if (err) { console.log(err); return res.status(401).send(err); } res.status(200).send(req.body); }); }); app.get("/api/namegen", function (req, res) { const getNames = require("./namegen.js"); getNames(function(names) { return res.json(names); }); }); }; <file_sep>/models/index.js module.exports = { skill: require("./skill"), post: require("./post"), answer: require("./answer"), user: require("./user") }; <file_sep>/db/schema.sql drop database if exists peer_up_db; create database peer_up_db; use peer_up_db; create table users ( ID int not null AUTO_INCREMENT, username varchar(255), email varchar(255), password varchar(255), primary key(ID) ); create table skills ( ID int not null AUTO_INCREMENT, subject varchar(255), primary key (ID) ); create table posts ( ID int not null AUTO_INCREMENT, body text, user_id int not null, skill_id int, reply_to_id int, primary key(ID), foreign key(user_id) references users(ID), foreign key(skill_id) references skills(ID), foreign key(reply_to_id) references posts(ID) ); <file_sep>/public/assets/js/api.js // Functions to talk to the back end API // Base URL for API calls. Will need to be changed when put into production const baseUrl = "http://localhost:8080"; // POST calls // Log in the user function logIn(credentials, callback) { $.ajax(baseUrl + "/api/login", { method: "POST", data: { email: credentials.email, password: <PASSWORD> } }) .then(function (response) { if (!response) { return callback(new Error("Login failed.")); } callback(null, response); }) .catch(function (err) { callback(err); }); } // Sign up a new user function signUp(userInfo, callback) { $.ajax(baseUrl + "/api/signup", { method: "POST", data: { username: userInfo.username, email: userInfo.email, password: <PASSWORD> } }) .then(function (response) { if (!response) { return callback(new Error("Signup failed.")); } callback(null, response); }) .catch(function (err) { callback(err); }); } // Add a new post function addPost(post, callback) { $.ajax(baseUrl + "/api/posts", { method: "POST", data: post }) .then(function (response) { callback(null, response); }) .catch(function (err) { callback(err); }); } // Add a new answer function addAnswer(answer, callback) { addPost(answer, callback); } // GET Calls function getRandomUsername(callback) { $.get(baseUrl + "/api/namegen") .then(function (response) { callback(null, response); }) .catch(function (err) { callback(err); }); } // Get info about currently logged in user function getUserInfo(callback) { $.get(baseUrl + "/api/user_info") .then(function (response) { callback(null, response); }) .catch(function (err) { callback(err); }); } // Log out the current user function logOut(callback) { $.get(baseUrl + "/api/logout", function (response) { callback(null, response); }) .catch(function (err) { callback(err); }); } // Get list of available skills function getSkills(callback) { $.get(baseUrl + "/api/skills") .then(function (response) { callback(null, response); }) .catch(function (err) { callback(err); }); } // Get all posts function getPosts(callback) { $.get(baseUrl + "/api/posts") .then(function (response) { callback(null, response); }) .catch(function (err) { callback(err); }); } // Get the answers (replies) to a specified post function getAnswers(postId, callback) { $.get(baseUrl + "/api/answers/" + postId, { async: false }) .then(function (response) { callback(null, response); }) .catch(function (err) { callback(err); }); } ///// End GET and POST Calls ///// // Calling functions as necessary // Login const login = $(".login"); login.on("click", function(){ logIn(credentials, callback); }); // Signup a new user if( !signUp) { const signUp = $("#signup-btn"); signUp.on("click", function() { signUp(userInfo, callback); }); } // Post from Skills page const post = $(".login-post"); post.on("click", function() { addPost(post, callback); }) // New post from feed const newPost = $(".new-post"); newPost.on("click", function() { addPost(post, callback); }); // Logout const logout = $(".logout-icon"); logout.on("click", function() { logOut(callback); }) <file_sep>/public/assets/js/skill.js const strengths = $(".strength"); strengths.on("click", function (event) { event.preventDefault(); $(this).attr("style", "background-color: #F2F2F2;"); });<file_sep>/config/query.js const connection = require("../config/connection"); class Query { constructor() { this.command = { insertData: { text: "", table: "", fields: [], values: [] }, updateData: { text: "", table: "" }, setData: { text: "", field: "", value: "" }, deleteData: { text: "", table: "" }, selectData: { text: "", fields: [] }, fromData: { text: "", table: {} }, joinDataArray: [], whereData: { text: "", field: "", value: "" }, whereAdditionalDataArray: [], groupByData: { text: "", fields: [] }, orderByData: { text: "", fields: [] }, limitData: { text: "", count: 0 } }; } insert(table, fields, values) { this.command.insertData.text = "INSERT INTO ?? "; this.command.insertData.table = table; if (Array.isArray(fields)) { this.command.insertData.text += " (" + this.stringArrayToList(fields, "??") + ")\n"; this.command.insertData.fields = fields; } else { this.command.insertData.text += " (??)\n"; this.command.insertData.fields = [fields]; } if (Array.isArray(values)) { this.command.insertData.text += "VALUES (" + this.stringArrayToList(values, "?") + ")\n"; this.command.insertData.values = values; } else { this.command.insertData.text += "VALUES (?)\n"; this.command.insertData.values = [values]; } return this; } select(fields) { if (Array.isArray(fields)) { this.command.selectData.text = "SELECT " + this.stringArrayToList(fields, "??") + "\n"; this.command.selectData.fields = fields; } else { this.command.selectData.text = "SELECT ??\n"; this.command.selectData.fields = [fields]; } return this; } from(tableName) { if (typeof tableName === "object" && Object.keys(tableName).length === 2) { this.command.fromData.text = "FROM ?? AS ??\n"; this.command.fromData.table = tableName[Object.keys(tableName)[0]]; this.command.fromData.alias = tableName[Object.keys(tableName)[1]]; } else { this.command.fromData.text = "FROM ??\n"; this.command.fromData.table = tableName; this.command.fromData.alias = ""; } return this; } innerJoin(table, leftKey, rightKey) { if (typeof table === "object" && Object.keys(table).length === 2) { this.command.joinDataArray.push({ text: "INNER JOIN ?? AS ?? ON ?? = ??\n", table: table[Object.keys(table)[0]], alias: table[Object.keys(table)[1]], leftKey: leftKey, rightKey: rightKey }); } else { this.command.joinDataArray.push({ text: "INNER JOIN ?? ON ?? = ??\n", table: table, alias: "", leftKey: leftKey, rightKey: rightKey }); } return this; } whereEqual(field, value) { this.command.whereData.text = "WHERE ?? = ?\n"; this.command.whereData.field = field; this.command.whereData.value = value; return this; } whereNull(field) { this.command.whereData.text = "WHERE ?? IS NULL\n"; this.command.whereData.field = field; this.command.whereData.value = ""; return this; } limit(count) { this.command.limitData.text = "LIMIT " + count.toString() + "\n"; this.command.limitData.count = count; return this; } go(cb) { let commandText = this.command.insertData.text + this.command.selectData.text + this.command.fromData.text; this.command.joinDataArray.forEach(joinDataItem => { commandText += joinDataItem.text; }); commandText += this.command.whereData.text; this.command.whereAdditionalDataArray.forEach(whereDataItem => { commandText += whereDataItem.text; }); commandText += this.command.limitData.text; const queryParams = []; (this.command.insertData.table !== "") ? queryParams.push(this.command.insertData.table) : null; (this.command.insertData.fields.length > 0) ? queryParams.push(...this.command.insertData.fields) : null; (this.command.insertData.values.length > 0) ? queryParams.push(...this.command.insertData.values) : null; (this.command.selectData.fields.length > 0) ? queryParams.push(...this.command.selectData.fields) : null; (this.command.fromData.table !== "") ? queryParams.push(this.command.fromData.table) : null; (this.command.fromData.alias !== "") ? queryParams.push(this.command.fromData.alias) : null; this.command.joinDataArray.forEach(joinDataItem => { queryParams.push(joinDataItem.table); (joinDataItem.alias !== "") ? queryParams.push(joinDataItem.alias) : null; queryParams.push(joinDataItem.leftKey, joinDataItem.rightKey ); }); (this.command.whereData.field !== "") ? queryParams.push(this.command.whereData.field) : null; (this.command.whereData.value !== "") ? queryParams.push(this.command.whereData.value) : null; this.command.whereAdditionalDataArray.forEach(whereDataItem => { queryParams.push( whereDataItem.field, whereDataItem.value ); }); (this.command.limitData.count > 0) ? queryParams.push(this.command.limitData.count) : null; const query = connection.query(commandText, queryParams, (err, result) => { // console.log(query.sql); if (err) { return cb(err); } cb(null, result); }); } stringArrayToList(arr, str) { if (arr.length === 0) { throw ("arr must contain at least one element."); } let text = ""; for (let i = 1; i < arr.length; i++) { text += str + ", "; } return text + str; } } module.exports = Query;<file_sep>/controllers/html-routes.js const path = require("path"); const db = require("../models"); // Require some data models for use in dynamic rendering of HTML content using handlebars const post = require("../models/post"); const skill = require("../models/skill"); // Middleware for restricting pages based on authentication const isAuthenticated = require("../config/middleware/isAuthenticated"); module.exports = function (app) { app.get("/", function (req, res) { res.render("index"); }); app.get("/username", function (req, res) { const getNames = require("./namegen"); getNames(function (names) { res.render("username", { names: names }); }); }); app.get("/strengths", function (req, res) { db.skill.findAll(function (err, response) { if (err) { return res.status(401); } res.render("strengths", { skills: response }); }); }); app.get("/signup", function (req, res) { res.render("signup"); }); app.get("/answer", function (req, res) { res.render("answer"); }); app.get("/login", function (req, res ) { // If the user already has an account send them to the members page if (req.user) { res.redirect("/skills"); } else { res.render("index"); } }); // If a user who is not logged in tries to access this route they will be redirected to the signup page app.get("/feed", isAuthenticated, function (req, res) { db.post.findAll(function (err, result) { if (err) { return res.status(401); } res.render("feed", { posts: result }); }); }); app.get("/skills", isAuthenticated, function (req, res) { db.skill.findAll(function (err, response) { if (err) { return res.status(401); } res.render("skills", { skills: response }); }); }); };
1aff8a01ce4fb77e76f2356bbb22845d628f80aa
[ "Markdown", "SQL", "JavaScript" ]
14
Markdown
marissa-lc/Peer-One
5100d67b50787a5deda5ecb0fde29fe6d446b5cd
9b819d966d7433fbd40dd4080145817d29268c20
refs/heads/main
<repo_name>BarreraDLucas/ParcialIntegrador<file_sep>/transaccion.py import uuid import json class Transaccion(json.JSONEncoder): def __init__(self, dni_cliente, tipo_movimiento, monto_movimiento, estado, nombre_comercio): self.transaccion_id = str(uuid.uuid4()) self.dni_cliente = dni_cliente self.tipo_movimiento = tipo_movimiento self.monto_movimiento = int(monto_movimiento) self.estado = estado self.nombre_comercio = nombre_comercio def toJSON(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def crear_archivo(self, transaccion): archivo = open(f'./data/{self.transaccion_id}.json', "w") archivo.write(str(transaccion.toJSON())) archivo.close() def comprobacion(self): if self.monto_movimiento < 100000: return ("El movimiento no requiere comprobacion") else: return ("Se debe solicitar documentación que requiera la justificacion del movimiento") <file_sep>/test_creacion_archivo.py from transaccion import Transaccion import json def test_creacion_archivo(): tsc_a = Transaccion(dni_cliente=45990339, tipo_movimiento="CONSUMO", monto_movimiento=2000, estado="RECHAZADO" , nombre_comercio="MUSIMUNDO") tsc_a.crear_archivo(tsc_a) tsc_b = Transaccion(45990339, "CONSUMO", 2000, "APROBADO", "MUSIMUNDO") tsc_b.crear_archivo(tsc_b) tsc_c = Transaccion(30949303, "CASH_IN", 50000, "APROBADO", "PAGOFACIL") tsc_c.crear_archivo(tsc_c) def test_monto_movimiento(): transaccion_a = Transaccion(dni_cliente=45990339, tipo_movimiento="CONSUMO", monto_movimiento=200000, estado="APROBADO", nombre_comercio="DISCO") print(transaccion_a.comprobacion()) def test_json_movimiento(): transaccion_a = Transaccion(30949303, 'CASH_IN', 500, 'APROBADO', 'PAGOFACIL') movimiento_to_dict = json.loads(transaccion_a.toJSON()) print(movimiento_to_dict.keys()) print(movimiento_to_dict.items()) tipomovimiento = movimiento_to_dict.get('tipo_movimiento') print(tipomovimiento) test_monto_movimiento() test_json_movimiento() test_creacion_archivo()
e10b01ac0562ed0226774f8bcdc86274e19ae21e
[ "Python" ]
2
Python
BarreraDLucas/ParcialIntegrador
55bd8b1a1f0b22a770a03d48d3d7b089ed41fcaa
82d1804387fd54bb52affac1552f78e7eed18d12
refs/heads/master
<repo_name>wendy-sun-07/hw05-road-generation<file_sep>/src/lsystem/Road.ts import {vec3, vec2, mat3, vec4, quat, glMatrix} from 'gl-matrix'; import Turtle from './turtle'; class Road { } <file_sep>/src/lsystem/DrawingRule.ts import { vec3, mat4, quat } from 'gl-matrix'; import Turtle from "./turtle"; export default class DrawingRule { lsys: Turtle = new Turtle(vec3.fromValues(0, 0, 0), vec3.fromValues(0, 1, 0), quat.fromValues(0, 0, 0, 1)); // Pass in a drawing function constructor(turtle: Turtle) { this.lsys = turtle; } draw(rand : number, currentChar : string) : any { // Get a random number // let rand = Math.random(); // List of possible drawing rules thus far: // F: move forward a certain distance and draw (e.g. 10 pixels) // +: turn left 30 degrees // -: turn right 30 degrees // [: push turtle // ]: pop turtle if (currentChar == "F") { return this.lsys.moveForward(); } else if (currentChar == "+") { return this.lsys.rotateLeft(); } else if (currentChar == "-") { return this.lsys.rotateRight(); } } } <file_sep>/src/lsystem/ExpansionRule.ts import { vec3 } from 'gl-matrix'; export default class ExpansionRule { constructor() { } expand(rand : number, currentChar : string) : string { // Get a random number // let rand = Math.random(); if (currentChar == "F"){ if (rand < 0.80) { // should keep moving forward return "FFFFFF"; } else { // should rotate some way and keep going this way return "F[+FFF"; } } } } <file_sep>/src/lsystem/Turtle.ts import {vec3, mat4, quat} from 'gl-matrix'; export default class Turtle { position: vec3 = vec3.create(); direction: vec3 = vec3.create(); // Ensure that orientation is normalized; quaternion: quat = quat.create(); constructor(pos: vec3, orient: vec3, q: quat) { this.position = pos; this.direction = orient; this.quaternion = q; } clear() { this.position = vec3.fromValues(0, 0, 0); this.direction = vec3.fromValues(0, 1, 0); this.quaternion = quat.fromValues(0, 0, 1, 0); } rotate(axis: vec3, degrees: number) { // Set up a rotation quaternion let q: quat = quat.create(); vec3.normalize(axis, axis); quat.setAxisAngle(q, axis, degrees * Math.PI / 180.0); quat.normalize(q, q); // Update the orientation direction of our turtle this.direction = vec3.transformQuat(this.direction, this.direction, q); vec3.normalize(this.direction, this.direction); // Save the current rotation in our turtle's quaternion quat.rotationTo(this.quaternion, vec3.fromValues(0, 1, 0), this.direction); } // + rotateLeft() { console.log("rotate left"); this.rotate(vec3.fromValues(0, 0, 1), -26.7); } // - rotateRight() { console.log("rotate right"); this.rotate(vec3.fromValues(0, 0, 1), 26.7); } // F moveForward() { let translate = vec3.create(); translate = vec3.multiply(translate, this.direction, vec3.fromValues(0.1, 0.1, 1.0)); vec3.add(this.position, this.position, translate); } getMatrix() { // Translate let T: mat4 = mat4.create(); mat4.fromTranslation(T, this.position); // Rotate let R: mat4 = mat4.create(); mat4.fromQuat(R, this.quaternion); // Scale, based on depth let S: mat4 = mat4.create(); mat4.fromScaling(S, vec3.fromValues(0.1, 0.1, 0.05)); // Multiply together let transformation: mat4 = mat4.create(); mat4.multiply(transformation, R, S); return mat4.multiply(transformation, T, transformation); } } <file_sep>/src/lsystem/LSystem.ts import { vec3, mat4, quat } from 'gl-matrix'; import Turtle from './Turtle'; import DrawingRule from './DrawingRule'; import ExpansionRule from './ExpansionRule'; // TODO: ask about the LSystem structure let rand1 : number = Math.random(); let rand2 : number = Math.random(); let rand3 : number = Math.random(); export default class LSystem { turtle: Turtle = new Turtle(vec3.fromValues(rand1 * 3, rand2 * 3, rand3 * 3), vec3.fromValues(1, 0, 0), quat.fromValues(0, 0, 0, 1)); // Current turtle turtleHistory: Turtle[] = []; // Stack of turtle history dr: DrawingRule = new DrawingRule(this.turtle); // Map of drawing rules er: ExpansionRule = new ExpansionRule(); grammar: string; transformHistory: mat4[] = []; leafHistory: mat4[] = []; // tempTransform: mat4; // this.transformHistory.push(tempTransform); constructor(axiom: string) { this.grammar = axiom; } // [ pushState() { console.log("push state"); let newPos: vec3 = vec3.create(); vec3.copy(newPos, this.turtle.position); let newOri: vec3 = vec3.create(); vec3.copy(newOri, this.turtle.direction); let newQuat: quat = quat.create(); quat.copy(newQuat, this.turtle.quaternion); let temp: Turtle = new Turtle(newPos, newOri, newQuat); console.log("pushing " + temp.position[0] + " " + temp.position[1] + " " + temp.position[2]); this.turtleHistory.push(temp); console.log("there are " + this.turtleHistory.length + " on turtle stack"); } // ] popState(){ console.log("pop state"); console.log("before pop there are " + this.turtleHistory.length + " on turtle stack"); var s: Turtle = this.turtleHistory.pop(); console.log("popping " + s.position[0] + " " + s.position[1] + " " + s.position[2]); console.log("after pop there are " + this.turtleHistory.length + " on turtle stack"); this.turtle.position = s.position, this.turtle.direction = s.direction; this.turtle.quaternion = s.quaternion; } expandGrammarSingle(str: string) : string { // Use the expansion rules let rand: number = Math.random(); var result = ""; result = this.er.expand(rand, str); // this expands a single char into something return result; } // Iterate over each char in the axiom and replace it with its expansion expandGrammar(texWidth: number, texHeight:number, str: string) : string { console.log("Text width " + texWidth); console.log("Text height " + texHeight); var output = this.grammar; let temp: vec3 = vec3.create(); temp = this.turtle.position; for (var i = 0; i < 200; i++) { for (var j = 0; j < str.length; j++) { console.log("current x" + temp[0]); console.log("current y" + temp[1]); output = output.concat(this.expandGrammarSingle(str.charAt(j))); } } return output; } drawGrammarSingle(str: string) : void { // Use the expansion rules let rand: number = Math.random(); var result = ""; if (str == "F") { let transMat : any = this.turtle.getMatrix(); this.transformHistory.push(transMat); } if (str == "[") { this.pushState(); } else if (str == "]") { this.popState(); } else { let func = this.dr.draw(rand, str); if (func) { func(); } } } drawGrammar(str: string) : void { for (var j = 0; j < str.length; j++) { this.drawGrammarSingle(str.charAt(j)); } } } <file_sep>/README.md # hw05-road-generation ## The Red Brick Road ## WENDY SUN (wentings) - Demo: https://wentings.github.io/hw05-road-generation/ ![](road.png) road on land vs. water ![](road1.png) road on population density ## References - [Procedural Modeling of Cities](proceduralCityGeneration.pdf) - Line Intersection: CIS 460 lectures slides - Noise functions: Previous homework 1 code ## 2D Maps I generated terrains using an fbm function that calculates the elevation of the land. The two possible views for looking at terrain elevation and simply land vs. water are colored appropriately using the height calculated. The lighter colors represents higher elevation. I generated the population density map using perlin noise, and I checked if the position is on land. If it's in the water or its height is below 0.4, the population density is automatically set to 0. The lighter red color represents higher density. ## New Pseudo L-System Classes I mostly revised the L-system classes from the past homework. This time, I started the seed in a random position on the plane. The turtle generates a path in its wake. The branching uses instance rendering to create a pseudo L-system that keeps track of the state of the turtle as it branches. The highway do branch on top of water, which is something I would like to change in the future. ## Future Improvement Currently the L-system definitely does not function like it should - it doesn't branch based on its local environment or the data generated by the texture, and I have yet to successfully populate the smaller streets that are self sensitive and snaps to the highway. ## Tunable GUI Parameters - Road Complexity <file_sep>/src/main.ts import {vec3, mat4, quat} from 'gl-matrix'; import * as Stats from 'stats-js'; import * as DAT from 'dat-gui'; import Square from './geometry/Square'; import ScreenQuad from './geometry/ScreenQuad'; import OpenGLRenderer from './rendering/gl/OpenGLRenderer'; import Camera from './Camera'; import LSystem from './lsystem/LSystem' import {setGL} from './globals'; import ShaderProgram, {Shader} from './rendering/gl/ShaderProgram'; import Mesh from './geometry/Mesh'; // Define an object with application parameters and button callbacks // This will be referred to by dat.GUI's functions that add GUI elements. const controls = { 'Show population': false, 'Show terrain elevation': false, 'Show land vs. water': false, }; const texWidth = window.innerWidth; const texHeight = window.innerHeight; let square: Square; let square1: Square; let screenQuad: ScreenQuad; let background: ScreenQuad; let time: number = 0.0; function loadScene() { square = new Square(); square.create(); screenQuad = new ScreenQuad(); background = new ScreenQuad(); screenQuad.create(); background.create(); // this sets up the map let offsetsArray = []; let colorsArray = []; let col1Array = []; let col2Array = []; let col3Array = []; let col4Array = []; let n: number = 1.0; for(let i = 0; i < n; i++) { for(let j = 0; j < n; j++) { let currTransform = [25.0, 0.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 1.0]; // Dummy - todo, get rid of offsets offsetsArray.push(i); offsetsArray.push(j); offsetsArray.push(0); // push column vectors back col1Array.push(currTransform[0]); col1Array.push(currTransform[1]); col1Array.push(currTransform[2]); col1Array.push(currTransform[3]); col2Array.push(currTransform[4]); col2Array.push(currTransform[5]); col2Array.push(currTransform[6]); col2Array.push(currTransform[7]); col3Array.push(currTransform[8]); col3Array.push(currTransform[9]); col3Array.push(currTransform[10]); col3Array.push(currTransform[11]); col4Array.push(currTransform[12]); col4Array.push(currTransform[13]); col4Array.push(currTransform[14]); col4Array.push(currTransform[15]); // push colors back let rand: number = Math.random(); colorsArray.push(0.1); colorsArray.push(1.0 * 0.6); colorsArray.push(0.1); colorsArray.push(1.0); } } let col1: Float32Array = new Float32Array(col1Array); let col2: Float32Array = new Float32Array(col2Array); let col3: Float32Array = new Float32Array(col3Array); let col4: Float32Array = new Float32Array(col4Array); let colors: Float32Array = new Float32Array(colorsArray); let offset: Float32Array = new Float32Array(offsetsArray); square.setInstanceVBOs(offset, colors, col1, col2, col3, col4); square.setNumInstances(n * n); // grid of "particles" // --------------------------------------------------------------- // this sets up the roads??? how do i get the black to draw square1 = new Square(); square1.create(); // initialize LSystem and a Turtle to draw var lsys = new LSystem("F"); var x = lsys.expandGrammar(texWidth, texHeight, lsys.grammar); let transformations: mat4[] = lsys.transformHistory; lsys.drawGrammar(x); let offsetsArray_1 = []; let colorsArray_1 = []; let col1Array_1 = []; let col2Array_1 = []; let col3Array_1 = []; let col4Array_1 = []; let m: number = transformations.length; for (let i = 0; i < m; i++) { let currTransform_1 = transformations[i]; // Dummy - todo, get rid of offsets offsetsArray_1.push(0); offsetsArray_1.push(0); offsetsArray_1.push(0); // push column vectors back col1Array_1.push(currTransform_1[0]); col1Array_1.push(currTransform_1[1]); col1Array_1.push(currTransform_1[2]); col1Array_1.push(currTransform_1[3]); col2Array_1.push(currTransform_1[4]); col2Array_1.push(currTransform_1[5]); col2Array_1.push(currTransform_1[6]); col2Array_1.push(currTransform_1[7]); col3Array_1.push(currTransform_1[8]); col3Array_1.push(currTransform_1[9]); col3Array_1.push(currTransform_1[10]); col3Array_1.push(currTransform_1[11]); col4Array_1.push(currTransform_1[12]); col4Array_1.push(currTransform_1[13]); col4Array_1.push(currTransform_1[14]); col4Array_1.push(currTransform_1[15]); // push colors back colorsArray_1.push(1.0); colorsArray_1.push(0.0); colorsArray_1.push(0.0); colorsArray_1.push(1.0); } let col1_1: Float32Array = new Float32Array(col1Array_1); let col2_1: Float32Array = new Float32Array(col2Array_1); let col3_1: Float32Array = new Float32Array(col3Array_1); let col4_1: Float32Array = new Float32Array(col4Array_1); let colors_1: Float32Array = new Float32Array(colorsArray_1); let offset_1: Float32Array = new Float32Array(offsetsArray_1); square1.setInstanceVBOs(offset_1, colors_1, col1_1, col2_1, col3_1, col4_1); square1.setNumInstances(m); } function main() { // Initial display for framerate const stats = Stats(); stats.setMode(0); stats.domElement.style.position = 'absolute'; stats.domElement.style.left = '0px'; stats.domElement.style.top = '0px'; document.body.appendChild(stats.domElement); // Add controls to the gui const gui = new DAT.GUI(); gui.add(controls, 'Show population'); gui.add(controls, 'Show terrain elevation'); gui.add(controls, 'Show land vs. water'); // get canvas and webgl context const canvas = <HTMLCanvasElement> document.getElementById('canvas'); const gl = <WebGL2RenderingContext> canvas.getContext('webgl2'); if (!gl) { alert('WebGL 2 not supported!'); } // `setGL` is a function imported above which sets the value of `gl` in the `globals.ts` module. // Later, we can import `gl` from `globals.ts` to access it setGL(gl); // Initial call to load scene loadScene(); const camera = new Camera(vec3.fromValues(10, 10, 10), vec3.fromValues(0, 0, 0)); const renderer = new OpenGLRenderer(canvas); renderer.setClearColor(0.2, 0.2, 0.2, 1); gl.enable(gl.BLEND); gl.blendFunc(gl.ONE, gl.ONE); // Additive blending const instancedShader = new ShaderProgram([ new Shader(gl.VERTEX_SHADER, require('./shaders/instanced-vert.glsl')), new Shader(gl.FRAGMENT_SHADER, require('./shaders/instanced-frag.glsl')), ]); const flat = new ShaderProgram([ new Shader(gl.VERTEX_SHADER, require('./shaders/flat-vert.glsl')), new Shader(gl.FRAGMENT_SHADER, require('./shaders/flat-frag.glsl')), ]); const mapShader = new ShaderProgram([ new Shader(gl.VERTEX_SHADER, require('./shaders/map-vert.glsl')), new Shader(gl.FRAGMENT_SHADER, require('./shaders/map-frag.glsl')), ]); // This function will be called every frame function tick() { camera.update(); stats.begin(); instancedShader.setTime(time); flat.setTime(time++); mapShader.setTime(time++); gl.viewport(0, 0, window.innerWidth, window.innerHeight); renderer.clear(); // Pass user input to shaders if (controls["Show population"]) { mapShader.setShowPopulation(1.0); } else { mapShader.setShowPopulation(0.0); } if (controls["Show terrain elevation"]) { mapShader.setShowTerrainGradient(1.0); } else { mapShader.setShowTerrainGradient(0.0); } if (controls["Show land vs. water"]) { mapShader.setShowTerrainBinary(1.0); } else { mapShader.setShowTerrainBinary(0.0); } renderer.render(camera, mapShader, [square]); renderer.render(camera, instancedShader, [ square1, ]); stats.end(); // Tell the browser to call `tick` again whenever it renders a new frame requestAnimationFrame(tick); } window.addEventListener('resize', function() { renderer.setSize(window.innerWidth, window.innerHeight); camera.setAspectRatio(window.innerWidth / window.innerHeight); camera.updateProjectionMatrix(); flat.setDimensions(window.innerWidth, window.innerHeight); }, false); renderer.setSize(window.innerWidth, window.innerHeight); camera.setAspectRatio(window.innerWidth / window.innerHeight); camera.updateProjectionMatrix(); flat.setDimensions(window.innerWidth, window.innerHeight); // Start the render loop tick(); } main();
8247b924182a42596ac482cd068979cccd5593f1
[ "Markdown", "TypeScript" ]
7
TypeScript
wendy-sun-07/hw05-road-generation
46911bf2dd1e51d373c2e23c46dc1d5c956141ec
470d053d67ce8c721bdd27f066f8bfefe0b7a603
refs/heads/master
<repo_name>JocelynRoul/LP011-Medic<file_sep>/src/view/reservation.java package view; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.sql.ResultSet; import java.sql.SQLException; import java.text.ParseException; import java.util.ArrayList; import javax.swing.JButton; import javax.swing.JComboBox; import javax.swing.JFrame; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JTextField; import javax.swing.border.EmptyBorder; import fonction.gestionDate; import model.Connexion; public class reservation extends JFrame{ private JPanel contentPane; private JTextField textField; private JButton btnRetour; public reservation(Connexion connexion,String nom,String demande) throws SQLException, ParseException { setTitle("reservation"); setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); setBounds(100, 100, 600, 300); contentPane = new JPanel(); contentPane.setBorder(new EmptyBorder(5, 5, 5, 5)); setContentPane(contentPane); contentPane.setLayout(null); textField = new JTextField(); textField.setBounds(204, 79, 124, 19); contentPane.add(textField); textField.setColumns(10); JLabel lblNombreDeJours = new JLabel("nombre de jours :"); lblNombreDeJours.setBounds(48, 81, 124, 15); contentPane.add(lblNombreDeJours); // btn vers accueil btnRetour = new JButton("Retour"); btnRetour.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { accueil frameAccueil; try { frameAccueil = new accueil(connexion); frameAccueil.setVisible(true); dispose(); } catch (SQLException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } } }); btnRetour.setBounds(248, 119, 80, 25); contentPane.add(btnRetour); // ComboBox pour choisir l'article JComboBox comboBoxDispo = new JComboBox(); comboBoxDispo.setBounds(58, 39, 422, 24); contentPane.add(comboBoxDispo); ResultSet rs = connexion.Query("SELECT * FROM "+demande+",Article WHERE "+ demande +".articleId = Article.id AND Article.idClient = 999"); ArrayList lst = new ArrayList(); if(demande.equals("fauteuil")) { while(rs.next()) { comboBoxDispo.addItem(rs.getInt("articleId")+ " /modele: "+rs.getString("modele")+ " /Largeur: "+rs.getString("largeurAssise")+ " /poids: "+rs.getString("poids")+ " /prix: "+rs.getFloat("prix")); lst.add(rs.getInt("articleId")); }; }else if(demande.equals("lit")) { while(rs.next()) { comboBoxDispo.addItem(rs.getInt("articleId")+ " /modele: "+rs.getString("modele")+ " /Largeur: "+rs.getString("poidsMax")+ " /poids: "+rs.getString("dim")+ " /prix: "+rs.getFloat("prix")); lst.add(rs.getInt("articleId")); }; } JLabel label = new JLabel("l'article :"); label.setBounds(48, 12, 124, 15); contentPane.add(label); JButton btnReserver = new JButton("Reserver"); btnReserver.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { accueil frameAccueil; try { // Modifier le numero d'utilisateur dans la table Article try { String dateFin = gestionDate.ajoutJour(connexion,Integer.parseInt(textField.getText())); String dateDeb = gestionDate.dateToday(); int idC = connexion.Query("SELECT * FROM Client WHERE Client.nom like '%"+nom+"%'").getInt("id"); int idO = (int) lst.get(comboBoxDispo.getSelectedIndex()); int idA = connexion.Query("SELECT * FROM "+demande+" WHERE "+ demande +".articleId = "+ idO).getInt("articleId"); connexion.Update("Update Article SET idClient = "+idC+", finReservation = '"+dateFin+"', debReservation = '"+dateDeb+"' WHERE Article.id = "+ idA); int idH = connexion.Count("SELECT * FROM Historique")+1; ResultSet rsH = connexion.Query("SELECT * FROM Article WHERE Article.id = "+idA); System.out.println("INSERT INTO Historique VALUES ("+idH+","+idA+","+rsH.getString("debReservation")+","+rsH.getString("finReservation")+","+rsH.getInt("idClient")+","+rsH.getFloat("prix")); connexion.Update("INSERT INTO Historique VALUES ("+idH+","+idA+","+rsH.getString("debReservation")+","+rsH.getString("finReservation")+","+rsH.getInt("idClient")+","+rsH.getFloat("prix")+")"); frameAccueil = new accueil(connexion); frameAccueil.setVisible(true); dispose(); } catch (ParseException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } } catch (SQLException sqlException) { sqlException.printStackTrace(); System.out.println("Erreur de connexion"); } } }); btnReserver.setBounds(72, 119, 124, 25); contentPane.add(btnReserver); } } <file_sep>/src/model/lit.java package model; import java.sql.Date; public class lit extends article{ private int poidsMax; private String dim; public lit(int unId, Date uneFinReservation,int unPoidsMax, String uneDim) { super(unId,uneFinReservation); this.poidsMax = unPoidsMax; this.dim = uneDim; } public int getPoidsMax() { return poidsMax; } public void setPoidsMax(int poids) { this.poidsMax = poids; } public String getDim() { return dim; } public void setDim(String dim) { this.dim = dim; } } <file_sep>/src/model/fauteuil.java package model; import java.sql.Date; public class fauteuil extends article{ private int largeurAssise; private int poids; public fauteuil(int uneLargeur,int unPoids,int unId,Date uneFinReservation) { super(unId,uneFinReservation); this.largeurAssise = uneLargeur; this.poids = unPoids; } public int getLargeurAssise() { return largeurAssise; } public void setLargeurAssise(int largeurAssise) { this.largeurAssise = largeurAssise; } public int getPoids() { return poids; } public void setPoids(int poids) { this.poids = poids; } } <file_sep>/src/view/Administration.java package view; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import javax.swing.JButton; import javax.swing.JComboBox; import javax.swing.JFrame; import javax.swing.JPanel; import javax.swing.border.EmptyBorder; import model.Connexion; import javax.swing.JLabel; public class Administration extends JFrame{ private JPanel contentPane; private JButton btnRetour; public Administration(Connexion connexion,String nom) throws SQLException{ setTitle("Administration"); setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); setBounds(100, 100, 800, 300); contentPane = new JPanel(); contentPane.setBorder(new EmptyBorder(5, 5, 5, 5)); setContentPane(contentPane); contentPane.setLayout(null); JLabel lblFauteuil = new JLabel("Fauteuil reserver"); lblFauteuil.setBounds(31, 12, 66, 15); contentPane.add(lblFauteuil); JLabel lblLit = new JLabel("Lit reserver"); lblLit.setBounds(31, 75, 66, 15); contentPane.add(lblLit); JComboBox comboBoxL = new JComboBox(); comboBoxL.setBounds(60, 102, 622, 24); contentPane.add(comboBoxL); ResultSet rsL = connexion.Query("SELECT * FROM Article,Lit,Client WHERE Article.idClient = Client.id AND Article.id = Lit.articleId AND Client.nom like '%"+nom+"%'" ); ArrayList lstL = new ArrayList(); while(rsL.next()) { comboBoxL.addItem(rsL.getInt("articleId")+ " /par: "+rsL.getString("nom")+ " /DateDebut: "+rsL.getString("debReservation")+ " /DateFin: "+rsL.getString("finReservation")+ " /prix: "+rsL.getString("prix")); lstL.add(rsL.getInt("articleId")); }; JComboBox comboBoxF = new JComboBox(); comboBoxF.setBounds(60, 39, 622, 24); contentPane.add(comboBoxF); ResultSet rsF = connexion.Query("SELECT * FROM Article,Fauteuil,Client WHERE Article.idClient = Client.id AND Article.id = Fauteuil.articleId AND Client.nom like '%"+nom+"%'" ); ArrayList lstF = new ArrayList(); while(rsL.next()) { comboBoxF.addItem(rsF.getInt("articleId")+ " /par: "+rsF.getString("nom")+ " /DateDebut: "+rsF.getString("debReservation")+ " /DateFin: "+rsF.getString("finReservation")+ " /prix: "+rsF.getString("prix")); lstF.add(rsF.getInt("articleId")); }; btnRetour = new JButton("Retour"); btnRetour.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { accueil frameAccueil; try { frameAccueil = new accueil(connexion); frameAccueil.setVisible(true); dispose(); } catch (SQLException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } } }); btnRetour.setBounds(248, 159, 80, 25); contentPane.add(btnRetour); } } <file_sep>/src/view/accueil.java package view; import java.awt.EventQueue; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.sql.ResultSet; import java.sql.SQLException; import java.text.ParseException; import java.util.ArrayList; import javax.swing.JButton; import javax.swing.JComboBox; import javax.swing.JFrame; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JTextField; import javax.swing.border.EmptyBorder; import model.Connexion; public class accueil extends JFrame{ private JPanel contentPane; private JTextField textField; private JTextField textField_1; private JButton btnConnection; public static void main(String[] args) { EventQueue.invokeLater(new Runnable() { public void run() { try { Connexion connexion = new Connexion("Medic_db"); connexion.connect(); accueil frame = new accueil(connexion); frame.setVisible(true); } catch (Exception e) { e.printStackTrace(); } } }); } public accueil(Connexion connexion) throws SQLException { setTitle("Accueil"); setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); setBounds(100, 100, 406, 424); contentPane = new JPanel(); contentPane.setBorder(new EmptyBorder(5, 5, 5, 5)); setContentPane(contentPane); contentPane.setLayout(null); JLabel lblNom = new JLabel("Nom :"); lblNom.setBounds(66, 82, 66, 15); contentPane.add(lblNom); JLabel lblPrenom = new JLabel("Prenom :"); lblPrenom.setBounds(66, 115, 66, 15); contentPane.add(lblPrenom); ResultSet rs = connexion.Query("SELECT * FROM Client"); JComboBox comboBoxNom = new JComboBox(); comboBoxNom.setBounds(193, 80, 124, 19); contentPane.add(comboBoxNom); while(rs.next()) { comboBoxNom.addItem(rs.getString("nom")); System.out.println(rs.getString("nom")); } textField = new JTextField(); textField.setBounds(193, 80, 124, 19); contentPane.add(textField); textField.setColumns(10); textField_1 = new JTextField(); textField_1.setBounds(193, 113, 124, 19); contentPane.add(textField_1); textField_1.setColumns(10); JLabel lblDemande = new JLabel("demande :"); lblDemande.setBounds(66, 152, 85, 15); contentPane.add(lblDemande); JComboBox comboBoxDemande = new JComboBox(); comboBoxDemande.setBounds(193, 144, 124, 24); contentPane.add(comboBoxDemande); String[] list={"fauteuil","lit"}; comboBoxDemande.setModel(new javax.swing.DefaultComboBoxModel(list)); btnConnection = new JButton("Reserver"); btnConnection.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { reservation frameReservation; try { frameReservation = new reservation(connexion, comboBoxNom.getSelectedItem().toString(), comboBoxDemande.getSelectedItem().toString()); frameReservation.setVisible(true); dispose(); } catch (SQLException | ParseException e1) { e1.printStackTrace(); } dispose(); } }); btnConnection.setBounds(203, 185, 114, 25); contentPane.add(btnConnection); JButton btnAdm = new JButton("Administration"); btnAdm.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { Administration frameAdmin; try { frameAdmin = new Administration(connexion,comboBoxNom.getSelectedItem().toString()); frameAdmin.setVisible(true); dispose(); }catch(Exception e2) { System.out.println(e2); } } }); btnAdm.setBounds(203, 261, 114, 25); contentPane.add(btnAdm); contentPane.add(btnConnection); } }
cd12d0d80285462826b0b8416deeef2e72d97135
[ "Java" ]
5
Java
JocelynRoul/LP011-Medic
6783784deb31351244c82027a5c9d6b2873d5cc9
f1f59dd995c88dfe6edd3dce1b020dc442384b1e